Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 656 → Rev 657

/shark/trunk/kernel/activate.c
18,11 → 18,11
 
/**
------------
CVS : $Id: activate.c,v 1.7 2004-03-10 14:51:42 giacomo Exp $
CVS : $Id: activate.c,v 1.8 2004-05-17 15:03:51 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:42 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:51 $
------------
 
task_activate & group_activate
70,6 → 70,7
int task_activate(PID p)
{
LEVEL l; /* the level of the task p */
struct timespec t;
 
/* some controls on the task p */
if (p<0 || p>=MAX_PROC) {
81,6 → 82,8
return -1;
}
 
kern_gettime(&t);
 
/*+ if we are calling the runlevel functions the system is
into the global_context... we only have to call
the task_activate of the level +*/
91,7 → 94,7
proc_table[p].frozen_activations++;
else {
l = proc_table[p].task_level;
level_table[l]->public_activate(l,p);
level_table[l]->public_activate(l,p,&t);
}
kern_frestore(f);
return 0;
106,7 → 109,7
proc_table[p].frozen_activations++;
else {
l = proc_table[p].task_level;
level_table[l]->public_activate(l,p);
level_table[l]->public_activate(l,p,&t);
event_need_reschedule();
}
kern_frestore(f);
120,7 → 123,7
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_activate,(unsigned short int)proc_table[p].context,0);
l = proc_table[p].task_level;
level_table[l]->public_activate(l,p);
level_table[l]->public_activate(l,p,&t);
/* Preempt if necessary */
scheduler();
140,6 → 143,7
{
PID i; /* a counter */
register LEVEL l; /* a level value */
struct timespec t;
 
if (g == 0) {
errno = EINVALID_GROUP;
146,6 → 150,8
return -1;
}
 
kern_gettime(&t);
 
/*+ if we are calling the runlevel functions the system is
into the global_context... we only have to call
the task_activate of the level +*/
162,7 → 168,7
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_activate,(unsigned short int)proc_table[i].context,0);
l = proc_table[i].task_level;
level_table[l]->public_activate(l,i);
level_table[l]->public_activate(l,i,&t);
}
 
kern_frestore(f);
181,7 → 187,7
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_activate,(unsigned short int)proc_table[i].context,0);
l = proc_table[i].task_level;
level_table[l]->public_activate(l,i);
level_table[l]->public_activate(l,i,&t);
event_need_reschedule();
}
kern_frestore(f);
196,7 → 202,7
continue;
}
l = proc_table[i].task_level;
level_table[l]->public_activate(l,i);
level_table[l]->public_activate(l,i,&t);
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_activate,(unsigned short int)proc_table[i].context,0);
}
/shark/trunk/kernel/init.c
18,11 → 18,11
 
/**
------------
CVS : $Id: init.c,v 1.3 2003-04-16 17:18:15 giacomo Exp $
CVS : $Id: init.c,v 1.4 2004-05-17 15:03:51 anton Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2003-04-16 17:18:15 $
Revision: $Revision: 1.4 $
Last update: $Date: 2004-05-17 15:03:51 $
------------
 
- Kernel module registration functions
226,7 → 226,7
(int (*)(LEVEL,PID)) level_return0, /* public_eligible */
(void (*)(LEVEL,PID, int)) level_excfunc, /* public_dispatch */
(void (*)(LEVEL,PID)) level_excfunc, /* public_epilogue */
(void (*)(LEVEL,PID)) level_excfunc, /* public_activate */
(void (*)(LEVEL,PID,struct timespec *))level_excfunc, /* public_activate */
(void (*)(LEVEL,PID)) level_excfunc, /* public_unblock */
(void (*)(LEVEL,PID)) level_excfunc, /* public_block */
(int (*)(LEVEL,PID,void *)) level_excfunc, /* public_message */
/shark/trunk/kernel/modules/edf.c
20,11 → 20,11
 
/**
------------
CVS : $Id: edf.c,v 1.13 2004-03-10 14:51:43 giacomo Exp $
CVS : $Id: edf.c,v 1.14 2004-05-17 15:03:51 anton Exp $
 
File: $File$
Revision: $Revision: 1.13 $
Last update: $Date: 2004-03-10 14:51:43 $
Revision: $Revision: 1.14 $
Last update: $Date: 2004-05-17 15:03:51 $
------------
 
This file contains the scheduling module EDF (Earliest Deadline First)
60,104 → 60,180
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
//#define EDF_DEBUG
#define edf_printf kern_printf
 
/*+ Status used in the level +*/
#define EDF_READY MODULE_STATUS_BASE /*+ - Ready status +*/
#define EDF_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/
#define EDF_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/
#define EDF_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/
#define EDF_ZOMBIE MODULE_STATUS_BASE+5 /*+ to wait the free time +*/
#ifdef EDF_DEBUG
char *pnow() {
static char buf[40];
struct timespec t;
sys_gettime(&t);
sprintf(buf, "%ld.%06ld", t.tv_sec, t.tv_nsec/1000);
return buf;
}
char *ptime1(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
char *ptime2(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
#endif
 
/*+ flags +*/
#define EDF_FLAG_SPORADIC 1
#define EDF_FLAG_NORAISEEXC 2
#define EDF_FLAG_SLEEP 4
/* statuses used in the level */
#define EDF_READY MODULE_STATUS_BASE /* ready */
#define EDF_IDLE MODULE_STATUS_BASE+1 /* idle, waiting for offset/eop */
#define EDF_WAIT MODULE_STATUS_BASE+2 /* to sleep, waiting for eop */
#define EDF_ZOMBIE MODULE_STATUS_BASE+3 /* zombie, waiting for eop */
 
/*+ the level redefinition for the Earliest Deadline First level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
/* task flags */
#define EDF_FLAG_SPORADIC 1 /* the task is sporadic */
#define EDF_FLAG_SPOR_LATE 2 /* sporadic task with period overrun */
 
TIME period[MAX_PROC]; /*+ The task periods; the deadlines are
stored in the priority field +*/
int deadline_timer[MAX_PROC];
/*+ The task deadline timers +*/
 
int flag[MAX_PROC];
/*+ used to manage the JOB_TASK_MODEL and the
periodicity +*/
/* the level redefinition for the Earliest Deadline First level */
typedef struct {
level_des l; /* standard level descriptor */
IQUEUE ready; /* the ready queue */
int flags; /* level flags */
bandwidth_t U; /* used bandwidth */
 
IQUEUE ready; /*+ the ready queue +*/
int taskflags[MAX_PROC]; /* task flags */
TIME period[MAX_PROC]; /* task period */
TIME rdeadline[MAX_PROC]; /* task relative deadlines */
TIME offset[MAX_PROC]; /* task release offsets */
struct timespec release[MAX_PROC]; /* release time of the task */
struct timespec adeadline[MAX_PROC]; /* latest assigned deadline
(needed to correctly assign deadlines to queued activations) */
int dl_timer[MAX_PROC]; /* deadline overrun timer */
int eop_timer[MAX_PROC]; /* end of period timer */
int dl_miss[MAX_PROC]; /* deadline miss counter */
int wcet_miss[MAX_PROC]; /* WCET miss counter */
int nact[MAX_PROC]; /* number of pending periodic jobs */
int nskip[MAX_PROC]; /* number of skipped sporadic jobs */
} EDF_level_des;
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth +*/
static void EDF_timer_endperiod(void *par);
 
} EDF_level_des;
 
/* This function is called when a task misses its deadline */
 
static void EDF_timer_deadline(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
struct timespec *temp;
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
 
if (lev->flags & EDF_ENABLE_DL_EXCEPTION) {
kern_raise(XDEADLINE_MISS,p);
} else {
lev->dl_miss[p]++;
}
}
 
 
/* Release (or queue) task, post deadline and endperiod timers.
The release time is stored in lev->release[p]. */
 
static void EDF_intern_release(PID p, EDF_level_des *lev)
{
struct timespec temp;
 
/* post deadline timer */
if (lev->flags & EDF_ENABLE_DL_CHECK) {
temp = lev->release[p];
ADDUSEC2TIMESPEC(lev->rdeadline[p], &temp);
lev->dl_timer[p] = kern_event_post(&temp,EDF_timer_deadline,(void *)p);
}
 
/* release or queue next job */
if (proc_table[p].status == EDF_IDLE) {
/* assign deadline, insert task in the ready queue */
proc_table[p].status = EDF_READY;
*iq_query_timespec(p,&lev->ready) = lev->adeadline[p];
iq_timespec_insert(p,&lev->ready);
#ifdef EDF_DEBUG
edf_printf("At %s: releasing %s with deadline %s\n", pnow(),
proc_table[p].name, ptime1(&lev->adeadline[p]));
#endif
/* increase assigned deadline */
ADDUSEC2TIMESPEC(lev->period[p], &lev->adeadline[p]);
/* reschedule */
event_need_reschedule();
} else {
/* queue */
lev->nact[p]++;
}
 
/* increase release time */
ADDUSEC2TIMESPEC(lev->period[p],&lev->release[p]);
/* post end of period timer */
lev->eop_timer[p] = kern_event_post(&lev->release[p],
EDF_timer_endperiod,(void *)p);
 
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
}
 
 
/* Release after an offset */
 
static void EDF_timer_offset(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
 
#ifdef EDF_DEBUG
edf_printf("(EDF:Dl TIMER:%d)",p);
#endif
EDF_intern_release(p, lev);
}
 
switch (proc_table[p].status) {
case EDF_ZOMBIE:
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
break;
 
case EDF_IDLE:
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
/* similar to EDF_task_activate */
temp = iq_query_timespec(p,&lev->ready);
ADDUSEC2TIMESPEC(lev->period[p], temp);
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(temp,
EDF_timer_deadline,
(void *)p);
event_need_reschedule();
break;
/* This function is called at the end of the period */
 
case EDF_WAIT:
/* Without this, the task cannot be reactivated!!! */
proc_table[p].status = SLEEP;
static void EDF_timer_endperiod(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
 
/* Reset the EDF_FLAG_SLEEP */
lev->flag[p] &= ~EDF_FLAG_SLEEP;
lev->eop_timer[p] = -1;
 
break;
if (proc_table[p].status == EDF_ZOMBIE) {
/* put the task in the FREE state */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
return;
}
 
default:
/* else, a deadline miss occurred!!! */
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
kern_raise(XDEADLINE_MISS,p);
if (proc_table[p].status == EDF_WAIT) {
proc_table[p].status = SLEEP;
return;
}
if (!(lev->taskflags[p] & EDF_FLAG_SPORADIC)) {
/* if the task is periodic, rerelease it (now or later) */
EDF_intern_release(p, lev);
} else {
/* the sporadic task is still busy. mark it as late */
lev->taskflags[p] |= EDF_FLAG_SPOR_LATE;
}
}
 
 
/* This function is called when a guest task misses its deadline */
 
static void EDF_timer_guest_deadline(void *par)
{
PID p = (PID) par;
 
#ifdef EDF_DEBUG
edf_printf("(EDF:AAARRRGGGHHH!!!)");
#endif
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
kern_raise(XDEADLINE_MISS,p);
}
191,57 → 267,68
if (m->level != 0 && m->level != l) return -1;
h = (HARD_TASK_MODEL *)m;
if (!h->wcet || !h->mit) return -1;
if (h->drel > h->mit) return -1; /* only D <= T supported */
 
if (!h->drel) {
lev->rdeadline[p] = h->mit;
} else {
lev->rdeadline[p] = h->drel;
}
 
/* check the free bandwidth... */
if (lev->flags & EDF_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / h->mit) * h->wcet;
b = (MAX_BANDWIDTH / lev->rdeadline[p]) * h->wcet;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b)
if (MAX_BANDWIDTH - lev->U > b) {
lev->U += b;
else
} else {
return -1;
}
}
 
/* now we know that m is a valid model */
if (lev->flags & EDF_ENABLE_WCET_EXCEPTION) {
lev->flags |= EDF_ENABLE_WCET_CHECK;
}
if (lev->flags & EDF_ENABLE_DL_EXCEPTION) {
lev->flags |= EDF_ENABLE_DL_CHECK;
}
 
#ifdef EDF_DEBUG
edf_printf("(EDF:PubCrt:%d)", p);
#endif
 
lev->period[p] = h->mit;
if (lev->rdeadline[p] == lev->period[p]) {
/* Ensure that D <= T-eps to make dl_timer trigger before rel_timer */
lev->rdeadline[p] = lev->period[p] - 1;
}
lev->flag[p] = 0;
lev->taskflags[p] = 0;
 
if (h->periodicity == APERIODIC)
lev->flag[p] |= EDF_FLAG_SPORADIC;
lev->taskflags[p] |= EDF_FLAG_SPORADIC;
lev->deadline_timer[p] = -1;
lev->dl_timer[p] = -1;
lev->eop_timer[p] = -1;
 
/* Enable wcet check */
if (lev->flags & EDF_ENABLE_WCET_CHECK) {
proc_table[p].avail_time = h->wcet;
proc_table[p].wcet = h->wcet;
proc_table[p].control |= CONTROL_CAP;
proc_table[p].control |= CONTROL_CAP; /* turn on measurement */
}
 
lev->offset[p] = h->offset;
 
NULL_TIMESPEC(&lev->release[p]);
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void EDF_public_detach(LEVEL l, PID p)
{
/* the EDF level doesn't introduce any dinamic allocated new field.
we have only to decrement the allocated bandwidth */
 
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
#ifdef EDF_DEBUG
edf_printf("(EDF:PubDet:%d)", p);
#endif
 
if (lev->flags & EDF_ENABLE_GUARANTEE) {
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet;
lev->U -= (MAX_BANDWIDTH / lev->rdeadline[p]) * proc_table[p].wcet;
}
}
 
248,14 → 335,6
static void EDF_public_dispatch(LEVEL l, PID p, int nostop)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
#ifdef EDF_DEBUG
edf_printf("(EDF:PubDsp:%d)",p);
#endif
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
263,65 → 342,67
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
#ifdef EDF_DEBUG
edf_printf("(EDF:PubEpi:%d)",p);
#endif
 
/* check if the wcet is finished... */
if ((lev->flags & EDF_ENABLE_WCET_CHECK) && proc_table[p].avail_time <= 0) {
/* if it is, raise a XWCET_VIOLATION exception */
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,(unsigned short int)proc_table[p].context,0);
kern_raise(XWCET_VIOLATION,p);
proc_table[p].status = EDF_WCET_VIOLATED;
if (lev->flags & EDF_ENABLE_WCET_CHECK) {
if (proc_table[p].avail_time <= 0) {
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,(unsigned short int)proc_table[p].context,0);
if (lev->flags & EDF_ENABLE_WCET_EXCEPTION) {
kern_raise(XWCET_VIOLATION,p);
} else {
proc_table[p].control &= ~CONTROL_CAP;
lev->wcet_miss[p]++;
}
}
}
else {
/* the task has been preempted. it returns into the ready queue... */
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
}
 
/* the task returns to the ready queue */
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
 
}
 
static void EDF_public_activate(LEVEL l, PID p)
static void EDF_public_activate(LEVEL l, PID p, struct timespec *t)
{
struct timespec clocktime;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
struct timespec *temp;
 
#ifdef EDF_DEBUG
edf_printf("(EDF:PubAct:%d)", p);
#endif
kern_gettime(&clocktime);
 
if (lev->flag[p] & EDF_FLAG_SLEEP) {
lev->flag[p] &= ~EDF_FLAG_SLEEP;
if (!(lev->flag[p] & EDF_FLAG_SPORADIC))
proc_table[p].status = EDF_IDLE;
/* check if we are not in the SLEEP state */
if (proc_table[p].status != SLEEP) {
if (lev->flags & EDF_ENABLE_ACT_EXCEPTION) {
/* too frequent or wrongful activation: raise exception */
kern_raise(XACTIVATION,p);
} else {
/* skip the sporadic job, but increase a counter */
#ifdef EDF_DEBUG
edf_printf("At %s: activation of %s skipped\n", pnow(), proc_table[p].name);
#endif
lev->nskip[p]++;
}
return;
}
 
if (proc_table[p].status == EDF_WAIT) {
kern_raise(XACTIVATION,p);
return;
}
/* Test if we are trying to activate a non sleeping task */
/* Ignore this; the task is already active */
if (proc_table[p].status != SLEEP &&
proc_table[p].status != EDF_WCET_VIOLATED)
return;
/* set the release time to the activation time + offset */
lev->release[p] = *t;
ADDUSEC2TIMESPEC(lev->offset[p], &lev->release[p]);
 
/* set the absolute deadline to the activation time + offset + rdeadline */
lev->adeadline[p] = lev->release[p];
ADDUSEC2TIMESPEC(lev->rdeadline[p], &lev->adeadline[p]);
 
/* see also EDF_timer_deadline */
temp = iq_query_timespec(p, &lev->ready);
kern_gettime(temp);
ADDUSEC2TIMESPEC(lev->period[p], temp);
/* Check if release > clocktime. If so, release it later,
otherwise release it now. */
 
/* Insert task in the correct position */
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_IDLE;
 
/* Set the deadline timer */
lev->deadline_timer[p] = kern_event_post(temp,
EDF_timer_deadline,
(void *)p);
if (TIMESPEC_A_GT_B(&lev->release[p], &clocktime)) {
/* release later */
kern_event_post(&lev->release[p],EDF_timer_offset,(void *)p);
} else {
/* release now */
EDF_intern_release(p, lev);
}
}
 
static void EDF_public_unblock(LEVEL l, PID p)
328,10 → 409,7
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* Similar to EDF_task_activate,
but we don't check in what state the task is */
 
/* Insert task in the coEDFect position */
/* Insert task in the correct position */
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
}
353,53 → 431,86
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* task_message evaluation */
switch((long)(m)) {
 
/* task_endcycle */
case (long)(NULL):
 
#ifdef EDF_DEBUG
edf_printf("(EDF:EndCyc:%d)",p);
#endif
 
/* the task has terminated his job before it consume the wcet. All OK! */
if (!(lev->flag[p] & EDF_FLAG_SPORADIC) &&
!(lev->flag[p] & EDF_FLAG_SLEEP))
proc_table[p].status = EDF_IDLE;
else
proc_table[p].status = EDF_WAIT;
 
/* we reset the capacity counters... */
if (lev->flags & EDF_ENABLE_WCET_CHECK)
proc_table[p].avail_time = proc_table[p].wcet;
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
break;
 
/* task_disable */
case 1:
 
#ifdef EDF_DEBUG
edf_printf("(EDF:Dis:%d)",p);
#endif
 
/* Set the EDF_FLAG_SLEEP, in the next endcycle the task will
be set in EDF_WAIT */
lev->flag[p] |= EDF_FLAG_SLEEP;
 
/* If the task is EDF_IDLE, set to EDF_WAIT now */
if (proc_table[p].status == EDF_IDLE)
/* task_endcycle() */
case 0:
/* if there are no pending jobs */
if (lev->nact[p] == 0) {
/* remove deadline timer, if any */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
if (lev->taskflags[p] & EDF_FLAG_SPORADIC) {
/* sporadic task */
if (!(lev->taskflags[p] & EDF_FLAG_SPOR_LATE)) {
proc_table[p].status = EDF_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
lev->taskflags[p] &= ~EDF_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
proc_table[p].status = EDF_IDLE;
}
} else {
/* we are late / there are pending jobs */
lev->nact[p]--;
/* compute and assign absolute deadline */
*iq_query_timespec(p,&lev->ready) = lev->adeadline[p];
iq_timespec_insert(p,&lev->ready);
/* increase assigned deadline */
ADDUSEC2TIMESPEC(lev->period[p], &lev->adeadline[p]);
#ifdef EDF_DEBUG
edf_printf("(Late) At %s: releasing %s with deadline %s\n",
pnow(),proc_table[p].name,ptime1(&lev->adeadline[p]));
#endif
}
break;
/* task_sleep() */
case 1:
/* remove deadline timer, if any */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
if (lev->taskflags[p] & EDF_FLAG_SPORADIC) {
/* sporadic task */
if (!(lev->taskflags[p] & EDF_FLAG_SPOR_LATE)) {
proc_table[p].status = EDF_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
lev->taskflags[p] &= ~EDF_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
if (!(lev->nact[p] > 0)) {
/* we are on time. go to the EDF_WAIT state */
proc_table[p].status = EDF_WAIT;
} else {
/* we are late. delete pending activations and go to SLEEP */
lev->nact[p] = 0;
proc_table[p].status = SLEEP;
/* remove end of period timer */
if (lev->eop_timer[p] != -1) {
kern_event_delete(lev->eop_timer[p]);
lev->eop_timer[p] = -1;
}
}
}
break;
}
 
TRACER_LOGEVENT(FTrace_EVT_task_disable,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
break;
 
if (lev->flags & EDF_ENABLE_WCET_CHECK) {
proc_table[p].control |= CONTROL_CAP;
}
 
proc_table[p].avail_time = proc_table[p].wcet;
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
return 0;
 
}
406,10 → 517,22
 
static void EDF_public_end(LEVEL l, PID p)
{
proc_table[p].status = EDF_ZOMBIE;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* When the deadline timer fire, it put the task descriptor in
the free queue, and free the allocated bandwidth... */
if (!(lev->taskflags[p] & EDF_FLAG_SPOR_LATE)) {
/* remove the deadline timer (if any) */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
proc_table[p].status = EDF_ZOMBIE;
} else {
/* no endperiod timer will be fired, free the task now! */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
}
}
 
static void EDF_private_insert(LEVEL l, PID p, TASK_MODEL *m)
429,18 → 552,13
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
lev->deadline_timer[p] = -1;
lev->dl_timer[p] = -1;
 
lev->period[p] = job->period;
 
/* Set the deadline timer */
if (job->noraiseexc)
lev->flag[p] = EDF_FLAG_NORAISEEXC;
else {
lev->flag[p] = 0;
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
EDF_timer_guest_deadline,
(void *)p);
if (!job->noraiseexc) {
lev->dl_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
EDF_timer_guest_deadline,(void *)p);
}
}
 
471,18 → 589,16
iq_extract(p, &lev->ready);
 
/* we remove the deadline timer, because the slice is finished */
if (lev->deadline_timer[p] != NIL) {
kern_event_delete(lev->deadline_timer[p]);
lev->deadline_timer[p] = NIL;
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
 
}
 
 
/* Registration functions */
 
/*+ Registration function:
int flags the init flags ... see edf.h +*/
/* Registration function:
int flags the init flags ... see edf.h */
LEVEL EDF_register_level(int flags)
{
LEVEL l; /* the level that we register */
520,9 → 636,14
 
/* fill the EDF descriptor part */
for(i=0; i<MAX_PROC; i++) {
lev->period[i] = 0;
lev->deadline_timer[i] = -1;
lev->flag[i] = 0;
lev->period[i] = 0;
lev->dl_timer[i] = -1;
lev->eop_timer[i] = -1;
lev->taskflags[i] = 0;
lev->dl_miss[i] = 0;
lev->wcet_miss[i] = 0;
lev->nact[i] = 0;
lev->nskip[i] = 0;
}
 
iq_init(&lev->ready, &freedesc, 0);
539,3 → 660,35
return lev->U;
}
 
int EDF_get_nact(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
return lev->nact[p];
}
 
int EDF_get_dl_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
return lev->dl_miss[p];
}
 
int EDF_get_wcet_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
return lev->wcet_miss[p];
}
 
int EDF_get_nskip(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
return lev->nskip[p];
}
 
/shark/trunk/kernel/modules/posix.c
20,11 → 20,11
 
/**
------------
CVS : $Id: posix.c,v 1.8 2004-03-10 14:51:43 giacomo Exp $
CVS : $Id: posix.c,v 1.9 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.8 $
Last update: $Date: 2004-03-10 14:51:43 $
Revision: $Revision: 1.9 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
This file contains the scheduling module compatible with POSIX
210,7 → 210,7
proc_table[p].status = POSIX_READY;
}
 
static void POSIX_public_activate(LEVEL l, PID p)
static void POSIX_public_activate(LEVEL l, PID p, struct timespec *t)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
311,7 → 311,7
if (p == NIL)
printk("\nPanic!!! can't create main task...\n");
 
POSIX_public_activate(lev,p);
POSIX_public_activate(lev,p,NULL);
}
 
 
/shark/trunk/kernel/modules/srp.c
20,11 → 20,11
 
/**
------------
CVS : $Id: srp.c,v 1.7 2004-03-10 14:51:44 giacomo Exp $
CVS : $Id: srp.c,v 1.8 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:44 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
Stack Resource Policy. see srp.h for general details...
707,8 → 707,10
 
/* activate the task if it was activated while in lobby list! */
if (task_unblock_activation(x)) {
struct timespec t;
LEVEL sl = proc_table[x].task_level;
level_table[sl]->public_activate(sl,x);
kern_gettime(&t);
level_table[sl]->public_activate(sl,x,&t);
// kern_printf("activate it!!!");
}
}
/shark/trunk/kernel/modules/rr2.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rr2.c,v 1.7 2004-03-10 14:51:44 giacomo Exp $
CVS : $Id: rr2.c,v 1.8 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:44 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
This file contains the scheduling module RR2 (Round Robin) version 2
167,7 → 167,7
proc_table[p].status = RR2_READY;
}
 
static void RR2_public_activate(LEVEL l, PID p)
static void RR2_public_activate(LEVEL l, PID p, struct timespec *t)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
265,7 → 265,7
if (p == NIL)
printk("\nPanic!!! can't create main task...\n");
 
RR2_public_activate(lev,p);
RR2_public_activate(lev,p,NULL);
}
 
 
/shark/trunk/kernel/modules/hardcbs.c
358,10 → 358,9
private_epilogue(lev->scheduling_level,p);
}
 
static void HCBS_public_activate(LEVEL l, PID p)
static void HCBS_public_activate(LEVEL l, PID p, struct timespec *t)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
struct timespec t;
 
if (lev->flag[p] & HCBS_SLEEP) {
lev->flag[p] &= ~HCBS_SLEEP;
375,10 → 374,8
return;
}
 
kern_gettime(&t);
HCBS_activation(lev, p, t);
 
HCBS_activation(lev, p, &t);
 
/* Set the reactivation timer */
if (!(lev->flag[p] & HCBS_APERIODIC))
{
386,7 → 383,7
the deadline may be != from actual_time + period
(if we call the task_activate after a task_sleep, and the
deadline was postponed a lot...) */
TIMESPEC_ASSIGN(&lev->reactivation_time[p], &t);
TIMESPEC_ASSIGN(&lev->reactivation_time[p], t);
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
/shark/trunk/kernel/modules/ds.c
20,11 → 20,11
 
/**
------------
CVS : $Id: ds.c,v 1.7 2004-03-10 14:51:43 giacomo Exp $
CVS : $Id: ds.c,v 1.8 2004-05-17 15:03:51 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:43 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:51 $
------------
 
This file contains the aperiodic server DS (Deferrable Server)
278,7 → 278,7
}
}
 
static void DS_public_activate(LEVEL l, PID p)
static void DS_public_activate(LEVEL l, PID p, struct timespec *t)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
/shark/trunk/kernel/modules/cbs.c
20,11 → 20,11
 
/**
------------
CVS : $Id: cbs.c,v 1.10 2004-03-10 14:51:43 giacomo Exp $
CVS : $Id: cbs.c,v 1.11 2004-05-17 15:03:51 anton Exp $
 
File: $File$
Revision: $Revision: 1.10 $
Last update: $Date: 2004-03-10 14:51:43 $
Revision: $Revision: 1.11 $
Last update: $Date: 2004-05-17 15:03:51 $
------------
 
This file contains the aperiodic server CBS (Total Bandwidth Server)
404,10 → 404,9
private_epilogue(lev->scheduling_level,p);
}
 
static void CBS_public_activate(LEVEL l, PID p)
static void CBS_public_activate(LEVEL l, PID p, struct timespec *t)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
struct timespec t;
 
if (lev->flag[p] & CBS_SLEEP) {
lev->flag[p] &= ~CBS_SLEEP;
421,10 → 420,8
return;
}
 
kern_gettime(&t);
CBS_activation(lev, p, t);
 
CBS_activation(lev, p, &t);
 
/* Set the reactivation timer */
if (!(lev->flag[p] & CBS_APERIODIC))
{
432,7 → 429,7
the deadline may be != from actual_time + period
(if we call the task_activate after a task_sleep, and the
deadline was postponed a lot...) */
TIMESPEC_ASSIGN(&lev->reactivation_time[p], &t);
TIMESPEC_ASSIGN(&lev->reactivation_time[p], t);
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
/shark/trunk/kernel/modules/rm.c
20,24 → 20,17
 
/**
------------
CVS : $Id: rm.c,v 1.8 2004-03-10 14:51:44 giacomo Exp $
CVS : $Id: rm.c,v 1.9 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.8 $
Last update: $Date: 2004-03-10 14:51:44 $
Revision: $Revision: 1.9 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
This file contains the scheduling module RM (Rate Monotonic)
This file contains the scheduling module RM (rate/deadline monotonic)
 
Read rm.h for further details.
 
This file is equal to EDF.c except for:
 
. EDF changed to RM :-)
. q_timespec_insert changed to q_insert
. proc_table[p].priority is also modified when we modify lev->period[p]
 
 
**/
 
/*
67,91 → 60,174
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
/*+ Status used in the level +*/
#define RM_READY MODULE_STATUS_BASE /*+ - Ready status +*/
#define RM_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/
#define RM_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/
#define RM_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/
#define RM_ZOMBIE MODULE_STATUS_BASE+5 /*+ to wait the free time +*/
//#define RM_DEBUG
#define rm_printf kern_printf
 
/*+ flags +*/
#define RM_FLAG_SPORADIC 1
#define RM_FLAG_NORAISEEXC 2
#ifdef RM_DEBUG
/* some debug print functions */
char *pnow() {
static char buf[40];
struct timespec t;
sys_gettime(&t);
sprintf(buf, "%ld.%06ld", t.tv_sec, t.tv_nsec/1000);
return buf;
}
char *ptime1(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
char *ptime2(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
#endif
 
/*+ the level redefinition for the Rate Monotonic +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
/* statuses used in the level */
#define RM_READY MODULE_STATUS_BASE /* ready */
#define RM_IDLE MODULE_STATUS_BASE+1 /* idle, waiting for offset/eop */
#define RM_WAIT MODULE_STATUS_BASE+2 /* to sleep, waiting for eop */
#define RM_ZOMBIE MODULE_STATUS_BASE+3 /* zombie, waiting for eop */
 
TIME period[MAX_PROC]; /*+ The task periods; the deadlines are
stored in the priority field +*/
int deadline_timer[MAX_PROC];
/*+ The task deadline timers +*/
/* task flags */
#define RM_FLAG_SPORADIC 1 /* the task is sporadic */
#define RM_FLAG_SPOR_LATE 2 /* sporadic task with period overrun */
 
int flag[MAX_PROC];
/*+ used to manage the JOB_TASK_MODEL and the
periodicity +*/
 
IQUEUE ready; /*+ the ready queue +*/
/* the level redefinition for the Earliest Deadline First level */
typedef struct {
level_des l; /* standard level descriptor */
IQUEUE ready; /* the ready queue */
int flags; /* level flags */
bandwidth_t U; /* used bandwidth */
 
int flags; /*+ the init flags... +*/
int taskflags[MAX_PROC]; /* task flags */
TIME period[MAX_PROC]; /* task period */
TIME rdeadline[MAX_PROC]; /* task relative deadlines */
TIME offset[MAX_PROC]; /* task release offsets */
struct timespec release[MAX_PROC]; /* release time of the task */
int dl_timer[MAX_PROC]; /* deadline overrun timer */
int eop_timer[MAX_PROC]; /* end of period timer */
int dl_miss[MAX_PROC]; /* deadline miss counter */
int wcet_miss[MAX_PROC]; /* WCET miss counter */
int nact[MAX_PROC]; /* number of pending periodic jobs */
int nskip[MAX_PROC]; /* number of skipped sporadic jobs */
} RM_level_des;
 
bandwidth_t U; /*+ the used bandwidth +*/
 
} RM_level_des;
static void RM_timer_endperiod(void *par);
 
 
/* This function is called when a task misses its deadline */
 
static void RM_timer_deadline(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
struct timespec *temp;
lev = (RM_level_des *)level_table[proc_table[p].task_level];
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
 
if (lev->flags & RM_ENABLE_DL_EXCEPTION) {
kern_raise(XDEADLINE_MISS,p);
} else {
lev->dl_miss[p]++;
}
}
 
 
/* Release (or queue) task, post deadline and endperiod timers.
The release time is stored in lev->release[p]. */
 
static void RM_intern_release(PID p, RM_level_des *lev)
{
struct timespec temp;
 
/* post deadline timer */
if (lev->flags & RM_ENABLE_DL_CHECK) {
temp = lev->release[p];
ADDUSEC2TIMESPEC(lev->rdeadline[p], &temp);
lev->dl_timer[p] = kern_event_post(&temp,RM_timer_deadline,(void *)p);
}
 
/* release or queue next job */
if (proc_table[p].status == RM_IDLE) {
/* assign deadline, insert task in the ready queue */
proc_table[p].status = RM_READY;
*iq_query_priority(p,&lev->ready) = lev->rdeadline[p];
iq_priority_insert(p,&lev->ready);
#ifdef RM_DEBUG
rm_printf("At %s: releasing %s\n", pnow(), proc_table[p].name);
#endif
/* reschedule */
event_need_reschedule();
} else {
/* queue */
lev->nact[p]++;
}
 
/* increase release time */
ADDUSEC2TIMESPEC(lev->period[p],&lev->release[p]);
/* post end of period timer */
kern_event_post(&lev->release[p],RM_timer_endperiod,(void *)p);
 
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
}
 
 
/* First release */
 
static void RM_timer_offset(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
lev = (RM_level_des *)level_table[proc_table[p].task_level];
 
switch (proc_table[p].status) {
case RM_ZOMBIE:
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
break;
RM_intern_release(p, lev);
}
 
case RM_IDLE:
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
/* similar to RM_task_activate */
temp = iq_query_timespec(p, &lev->ready);
ADDUSEC2TIMESPEC(lev->period[p], temp);
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(temp,
RM_timer_deadline,
(void *)p);
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority );
event_need_reschedule();
break;
 
case RM_WAIT:
/* Without this, the task cannot be reactivated!!! */
proc_table[p].status = SLEEP;
break;
/* This function is called at the end of the period */
 
default:
/* else, a deadline miss occurred!!! */
kern_printf("timer_deadline:AAARRRGGGHHH!!!");
kern_raise(XDEADLINE_MISS,p);
static void RM_timer_endperiod(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
lev = (RM_level_des *)level_table[proc_table[p].task_level];
 
if (proc_table[p].status == RM_ZOMBIE) {
/* put the task in the FREE state */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
return;
}
if (!(lev->taskflags[p] & RM_FLAG_SPORADIC)) {
/* if the task is periodic, rerelease it (now or later) */
RM_intern_release(p, lev);
} else {
/* else check if the task is waiting for end of period */
if (proc_table[p].status == RM_WAIT) {
proc_table[p].status = SLEEP;
} else {
/* the task is still busy. mark it as late */
lev->taskflags[p] |= RM_FLAG_SPOR_LATE;
}
}
}
 
 
/* This function is called when a guest task misses its deadline */
 
static void RM_timer_guest_deadline(void *par)
{
PID p = (PID) par;
 
kern_printf("AAARRRGGGHHH!!!");
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
kern_raise(XDEADLINE_MISS,p);
}
 
159,7 → 235,6
static PID RM_public_scheduler(LEVEL l)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
return iq_query_first(&lev->ready);
}
 
179,7 → 254,6
static int RM_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
HARD_TASK_MODEL *h;
 
if (m->pclass != HARD_PCLASS) return -1;
186,49 → 260,68
if (m->level != 0 && m->level != l) return -1;
h = (HARD_TASK_MODEL *)m;
if (!h->wcet || !h->mit) return -1;
if (h->drel > h->mit) return -1; /* only D <= T supported */
 
/* update the bandwidth... */
if (!h->drel) {
lev->rdeadline[p] = h->mit;
} else {
lev->rdeadline[p] = h->drel;
}
 
/* check the free bandwidth... */
if (lev->flags & RM_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / h->mit) * h->wcet;
b = (MAX_BANDWIDTH / lev->rdeadline[p]) * h->wcet;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b)
if (MAX_BANDWIDTH - lev->U > b) {
lev->U += b;
else
} else {
return -1;
}
}
 
/* now we know that m is a valid model */
if (lev->flags & RM_ENABLE_WCET_EXCEPTION) {
lev->flags |= RM_ENABLE_WCET_CHECK;
}
if (lev->flags & RM_ENABLE_DL_EXCEPTION) {
lev->flags |= RM_ENABLE_DL_CHECK;
}
 
*iq_query_priority(p, &lev->ready) = lev->period[p] = h->mit;
lev->period[p] = h->mit;
if (lev->rdeadline[p] == lev->period[p]) {
/* Ensure that D <= T-eps to make dl_timer trigger before rel_timer */
lev->rdeadline[p] = lev->period[p] - 1;
}
lev->taskflags[p] = 0;
 
if (h->periodicity == APERIODIC)
lev->flag[p] = RM_FLAG_SPORADIC;
else
lev->flag[p] = 0;
lev->deadline_timer[p] = -1;
lev->taskflags[p] |= RM_FLAG_SPORADIC;
lev->dl_timer[p] = -1;
lev->eop_timer[p] = -1;
 
/* Enable wcet check */
if (lev->flags & RM_ENABLE_WCET_CHECK) {
proc_table[p].avail_time = h->wcet;
proc_table[p].wcet = h->wcet;
proc_table[p].control |= CONTROL_CAP;
proc_table[p].control |= CONTROL_CAP; /* turn on measurement */
}
 
lev->offset[p] = h->offset;
 
NULL_TIMESPEC(&lev->release[p]);
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void RM_public_detach(LEVEL l, PID p)
{
/* the RM level doesn't introduce any dinamic allocated new field.
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated
bandwidth */
 
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
if (lev->flags & RM_ENABLE_GUARANTEE) {
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet;
lev->U -= (MAX_BANDWIDTH / lev->rdeadline[p]) * proc_table[p].wcet;
}
}
 
235,12 → 328,6
static void RM_public_dispatch(LEVEL l, PID p, int nostop)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
// kern_printf("(disp %d)",p);
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
248,51 → 335,63
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
// kern_printf("(epil %d)",p);
 
/* check if the wcet is finished... */
if ((lev->flags & RM_ENABLE_WCET_CHECK) && proc_table[p].avail_time <= 0) {
/* if it is, raise a XWCET_VIOLATION exception */
kern_raise(XWCET_VIOLATION,p);
proc_table[p].status = RM_WCET_VIOLATED;
if (lev->flags & RM_ENABLE_WCET_CHECK) {
if (proc_table[p].avail_time <= 0) {
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,(unsigned short int)proc_table[p].context,0);
if (lev->flags & RM_ENABLE_WCET_EXCEPTION) {
kern_raise(XWCET_VIOLATION,p);
} else {
proc_table[p].control &= ~CONTROL_CAP;
lev->wcet_miss[p]++;
}
}
}
else {
/* the task has been preempted. it returns into the ready queue... */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
}
 
/* the task returns to the ready queue */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
 
}
 
static void RM_public_activate(LEVEL l, PID p)
static void RM_public_activate(LEVEL l, PID p, struct timespec *t)
{
struct timespec clocktime;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
struct timespec *temp;
 
if (proc_table[p].status == RM_WAIT) {
kern_raise(XACTIVATION,p);
kern_gettime(&clocktime);
 
/* check if we are not in the SLEEP state */
if (proc_table[p].status != SLEEP) {
if (lev->flags & RM_ENABLE_ACT_EXCEPTION) {
/* too frequent or wrongful activation: raise exception */
kern_raise(XACTIVATION,p);
} else {
/* skip the sporadic job, but increase a counter */
#ifdef RM_DEBUG
rm_printf("At %s: activation of %s skipped\n", pnow(), proc_table[p].name);
#endif
lev->nskip[p]++;
}
return;
}
/* set the release time to the activation time + offset */
lev->release[p] = *t;
ADDUSEC2TIMESPEC(lev->offset[p], &lev->release[p]);
 
/* Test if we are trying to activate a non sleeping task */
/* Ignore this; the task is already active */
if (proc_table[p].status != SLEEP &&
proc_table[p].status != RM_WCET_VIOLATED)
return;
/* Check if release > clocktime. If so, release it later,
otherwise release it now. */
 
proc_table[p].status = RM_IDLE;
 
/* see also RM_timer_deadline */
temp = iq_query_timespec(p, &lev->ready);
kern_gettime(temp);
ADDUSEC2TIMESPEC(lev->period[p], temp);
 
/* Insert task in the correct position */
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
 
/* Set the deadline timer */
lev->deadline_timer[p] = kern_event_post(temp,
RM_timer_deadline,
(void *)p);
if (TIMESPEC_A_GT_B(&lev->release[p], &clocktime)) {
/* release later */
kern_event_post(&lev->release[p],RM_timer_offset,(void *)p);
} else {
/* release now */
RM_intern_release(p, lev);
}
}
 
static void RM_public_unblock(LEVEL l, PID p)
299,9 → 398,6
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* Similar to RM_task_activate,
but we don't check in what state the task is */
 
/* Insert task in the correct position */
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
324,31 → 420,105
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* the task has terminated his job before it consume the wcet. All OK! */
if (lev->flag[p] & RM_FLAG_SPORADIC)
proc_table[p].status = RM_WAIT;
else /* pclass = sporadic_pclass */
proc_table[p].status = RM_IDLE;
switch((long)(m)) {
/* task_endcycle() */
case 0:
/* if there are no pending jobs */
if (lev->nact[p] == 0) {
/* remove deadline timer, if any */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
if (lev->taskflags[p] & RM_FLAG_SPORADIC) {
/* sporadic task */
if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) {
proc_table[p].status = RM_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
lev->taskflags[p] &= ~RM_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
proc_table[p].status = RM_IDLE;
}
} else {
/* we are late / there are pending jobs */
lev->nact[p]--;
*iq_query_priority(p,&lev->ready) = lev->rdeadline[p];
iq_priority_insert(p,&lev->ready);
#ifdef RM_DEBUG
rm_printf("(Late) At %s: releasing %s\n",
pnow(), proc_table[p].name);
#endif
}
break;
/* task_sleep() */
case 1:
/* remove deadline timer, if any */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
if (lev->taskflags[p] & RM_FLAG_SPORADIC) {
/* sporadic task */
if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) {
proc_table[p].status = RM_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
lev->taskflags[p] &= ~RM_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
if (!(lev->nact[p] > 0)) {
/* we are on time. go to the RM_WAIT state */
proc_table[p].status = RM_WAIT;
} else {
/* we are late. delete pending activations and go to SLEEP */
lev->nact[p] = 0;
proc_table[p].status = SLEEP;
/* remove end of period timer */
if (lev->eop_timer[p] != -1) {
kern_event_delete(lev->eop_timer[p]);
lev->eop_timer[p] = -1;
}
}
}
break;
}
 
/* we reset the capacity counters... */
if (lev->flags & RM_ENABLE_WCET_CHECK)
proc_table[p].avail_time = proc_table[p].wcet;
 
if (lev->flags & RM_ENABLE_WCET_CHECK) {
proc_table[p].control |= CONTROL_CAP;
}
proc_table[p].avail_time = proc_table[p].wcet;
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
/* when the deadline timer fire, it recognize the situation and set
correctly all the stuffs (like reactivation, sleep, etc... ) */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
return 0;
 
}
 
static void RM_public_end(LEVEL l, PID p)
{
proc_table[p].status = RM_ZOMBIE;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* When the deadline timer fire, it put the task descriptor in
the free queue, and free the allocated bandwidth... */
if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) {
/* remove the deadline timer (if any) */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
proc_table[p].status = RM_ZOMBIE;
} else {
/* no endperiod timer will be fired, free the task now! */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
}
}
 
static void RM_private_insert(LEVEL l, PID p, TASK_MODEL *m)
363,22 → 533,21
 
job = (JOB_TASK_MODEL *)m;
 
*iq_query_timespec(p,&lev->ready) = job->deadline;
/* Insert task in the correct position */
*iq_query_timespec(p, &lev->ready) = job->deadline;
/* THIS IS QUESTIONABLE!! rel deadline? */
*iq_query_priority(p, &lev->ready) = lev->period[p] = job->period;
lev->deadline_timer[p] = -1;
 
/* Insert task in the correct position */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
lev->dl_timer[p] = -1;
 
if (job->noraiseexc)
lev->flag[p] = RM_FLAG_NORAISEEXC;
else {
lev->flag[p] = 0;
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
RM_timer_guest_deadline,
(void *)p);
lev->period[p] = job->period;
 
if (!job->noraiseexc) {
lev->dl_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
RM_timer_guest_deadline,(void *)p);
}
}
 
405,26 → 574,21
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
//kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]);
if (proc_table[p].status == RM_READY)
{
iq_extract(p, &lev->ready);
//kern_printf("(g_end rdy extr)");
}
 
/* we remove the deadline timer, because the slice is finished */
if (lev->deadline_timer[p] != NIL) {
// kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]);
kern_event_delete(lev->deadline_timer[p]);
lev->deadline_timer[p] = NIL;
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
 
}
 
/* Registration functions */
 
/*+ Registration function:
int flags the init flags ... see rm.h +*/
 
/* Registration function:
int flags the init flags ... see rm.h */
LEVEL RM_register_level(int flags)
{
LEVEL l; /* the level that we register */
462,9 → 626,13
 
/* fill the RM descriptor part */
for(i=0; i<MAX_PROC; i++) {
lev->period[i] = 0;
lev->deadline_timer[i] = -1;
lev->flag[i] = 0;
lev->period[i] = 0;
lev->dl_timer[i] = -1;
lev->taskflags[i] = 0;
lev->dl_miss[i] = 0;
lev->wcet_miss[i] = 0;
lev->nact[i] = 0;
lev->nskip[i] = 0;
}
 
iq_init(&lev->ready, &freedesc, 0);
481,3 → 649,35
return lev->U;
}
 
int RM_get_nact(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
return lev->nact[p];
}
 
int RM_get_dl_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
return lev->dl_miss[p];
}
 
int RM_get_wcet_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
return lev->wcet_miss[p];
}
 
int RM_get_nskip(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
return lev->nskip[p];
}
 
/shark/trunk/kernel/modules/rrsoft.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rrsoft.c,v 1.8 2004-03-10 14:51:44 giacomo Exp $
CVS : $Id: rrsoft.c,v 1.9 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.8 $
Last update: $Date: 2004-03-10 14:51:44 $
Revision: $Revision: 1.9 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
This file contains the scheduling module RRSOFT (Round Robin)
257,7 → 257,7
proc_table[p].status = RRSOFT_READY;
}
 
static void RRSOFT_public_activate(LEVEL l, PID p)
static void RRSOFT_public_activate(LEVEL l, PID p, struct timespec *t)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
372,7 → 372,7
if (p == NIL)
printk("\nPanic!!! can't create main task...\n");
 
RRSOFT_public_activate(lev,p);
RRSOFT_public_activate(lev,p,NULL);
}
 
 
/shark/trunk/kernel/modules/ps.c
20,11 → 20,11
 
/**
------------
CVS : $Id: ps.c,v 1.7 2004-03-10 14:51:44 giacomo Exp $
CVS : $Id: ps.c,v 1.8 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:44 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
This file contains the aperiodic server PS (Polling Server)
318,7 → 318,7
}
}
 
static void PS_public_activate(LEVEL l, PID p)
static void PS_public_activate(LEVEL l, PID p, struct timespec *t)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
/shark/trunk/kernel/modules/rr.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rr.c,v 1.8 2004-03-10 14:51:44 giacomo Exp $
CVS : $Id: rr.c,v 1.9 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.8 $
Last update: $Date: 2004-03-10 14:51:44 $
Revision: $Revision: 1.9 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
This file contains the scheduling module RR (Round Robin)
191,7 → 191,7
#endif
}
 
static void RR_public_activate(LEVEL l, PID p)
static void RR_public_activate(LEVEL l, PID p, struct timespec *t)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
294,7 → 294,7
if (p == NIL)
printk(KERN_EMERG "Panic!!! can't create main task... errno =%d\n",errno);
 
RR_public_activate(lev,p);
RR_public_activate(lev,p,NULL);
 
#ifdef RRDEBUG
rr_printf("(main created %d)",p);
/shark/trunk/kernel/modules/intdrive.c
183,7 → 183,7
}
 
static void INTDRIVE_public_activate(LEVEL l, PID p)
static void INTDRIVE_public_activate(LEVEL l, PID p, struct timespec *t)
{
 
INTDRIVE_level_des *lev = (INTDRIVE_level_des *)(level_table[l]);
/shark/trunk/kernel/modules/ss.c
20,11 → 20,11
 
/**
------------
CVS : $Id: ss.c,v 1.7 2004-03-10 14:51:45 giacomo Exp $
CVS : $Id: ss.c,v 1.8 2004-05-17 15:03:53 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:45 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:53 $
------------
 
This file contains the aperiodic Sporadic Server (SS).
751,10 → 751,9
}
}
 
static void SS_public_activate(LEVEL l, PID p)
static void SS_public_activate(LEVEL l, PID p, struct timespec *t)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
 
#ifdef DEBUG
kern_printf("SS_tacti ");
770,13 → 769,12
if (lev->server_active == SS_SERVER_NOTACTIVE) {
lev->server_active = SS_SERVER_ACTIVE;
/* set replenish time */
kern_gettime(&ty);
ADDUSEC2TIMESPEC(lev->period, &ty);
TIMESPEC_ASSIGN(&lev->lastdline, &ty);
ADDUSEC2TIMESPEC(lev->period, t);
TIMESPEC_ASSIGN(&lev->lastdline, t);
#ifdef DEBUG
kern_printf("RT=%d.%d ",ty.tv_sec,ty.tv_nsec);
kern_printf("RT=%d.%d ",t->tv_sec,t->tv_nsec);
#endif
kern_event_post(&ty, SS_replenish_timer, (void *) l);
kern_event_post(t, SS_replenish_timer, (void *) l);
}
}
lev->activated = p;
/shark/trunk/kernel/modules/tbs.c
20,11 → 20,11
 
/**
------------
CVS : $Id: tbs.c,v 1.7 2004-03-10 14:51:45 giacomo Exp $
CVS : $Id: tbs.c,v 1.8 2004-05-17 15:03:53 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:45 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:53 $
------------
 
This file contains the aperiodic server TBS (Total Bandwidth Server)
252,17 → 252,15
private_epilogue(lev->scheduling_level,p);
}
 
static void TBS_public_activate(LEVEL l, PID p)
static void TBS_public_activate(LEVEL l, PID p, struct timespec *t)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
struct timespec t;
 
if (proc_table[p].status == SLEEP ||
proc_table[p].status == TBS_WCET_VIOLATED) {
 
kern_gettime(&t);
if (TIMESPEC_A_GT_B(&t, &lev->lastdline))
TIMESPEC_ASSIGN(&lev->lastdline, &t );
if (TIMESPEC_A_GT_B(t, &lev->lastdline))
TIMESPEC_ASSIGN(&lev->lastdline, t );
 
 
if (lev->activated == NIL) {
/shark/trunk/include/kernel/descr.h
21,11 → 21,11
 
/**
------------
CVS : $Id: descr.h,v 1.4 2003-03-13 13:36:27 pj Exp $
CVS : $Id: descr.h,v 1.5 2004-05-17 15:03:50 anton Exp $
 
File: $File$
Revision: $Revision: 1.4 $
Last update: $Date: 2003-03-13 13:36:27 $
Revision: $Revision: 1.5 $
Last update: $Date: 2004-05-17 15:03:50 $
------------
 
Kernel main data structures
255,7 → 255,7
void (*public_epilogue )(LEVEL l, PID p);
A task has been preempted (or its capacity is exausted).
 
void (*public_activate )(LEVEL l, PID p);
void (*public_activate )(LEVEL l, PID p, struct timespec *t);
A task has been activated.
 
void (*public_unblock )(LEVEL l, PID p);
291,7 → 291,7
int (*public_eligible )(LEVEL l, PID p);
void (*public_dispatch )(LEVEL l, PID p, int nostop);
void (*public_epilogue )(LEVEL l, PID p);
void (*public_activate )(LEVEL l, PID p);
void (*public_activate )(LEVEL l, PID p, struct timespec *t);
void (*public_unblock )(LEVEL l, PID p);
void (*public_block )(LEVEL l, PID p);
int (*public_message )(LEVEL l, PID p, void *m);
/shark/trunk/include/kernel/model.h
21,11 → 21,11
 
/**
------------
CVS : $Id: model.h,v 1.5 2004-03-19 12:37:41 giacomo Exp $
CVS : $Id: model.h,v 1.6 2004-05-17 15:03:50 anton Exp $
 
File: $File$
Revision: $Revision: 1.5 $
Last update: $Date: 2004-03-19 12:37:41 $
Revision: $Revision: 1.6 $
Last update: $Date: 2004-05-17 15:03:50 $
------------
 
This file contains the definitions of the task and resource models.
289,7 → 289,8
 
/* A Hard Task model can be used to model periodic and sporadic tasks.
These tasks are usually guaranteed basing on their minimum interarrival
time (mit) and wcet, and may have a relative deadline.
time (mit) and wcet, and may have a relative deadline and a release
offset.
 
A hard task can raise these exceptions:
XDEADLINE_MISS XWCET_VIOLATION XACTIVATION
304,6 → 305,7
TIME drel;
TIME wcet;
int periodicity;
TIME offset;
} HARD_TASK_MODEL;
 
#define hard_task_default_model(m) \
311,7 → 313,8
(m).mit = 0, \
(m).drel = 0, \
(m).wcet = 0, \
(m).periodicity = PERIODIC
(m).periodicity = PERIODIC, \
(m).offset = 0
#define hard_task_def_level(m,l) task_def_level((m).t,l)
#define hard_task_def_arg(m,a) task_def_arg((m).t,a)
#define hard_task_def_stack(m,s) task_def_stack((m).t,s)
324,6 → 327,7
#define hard_task_def_mit(m,p) (m).mit = (p)
#define hard_task_def_drel(m,d) (m).drel = (d)
#define hard_task_def_wcet(m,w) (m).wcet = (w)
#define hard_task_def_offset(m,o) (m).offset = (o)
#define hard_task_def_periodic(m) (m).periodicity = PERIODIC
#define hard_task_def_aperiodic(m) (m).periodicity = APERIODIC
#define hard_task_def_interrupt(m) (m).periodicity = INTDRIVE
/shark/trunk/include/modules/edf.h
21,11 → 21,11
 
/**
------------
CVS : $Id: edf.h,v 1.4 2003-05-05 07:31:12 pj Exp $
CVS : $Id: edf.h,v 1.5 2004-05-17 15:03:50 anton Exp $
 
File: $File$
Revision: $Revision: 1.4 $
Last update: $Date: 2003-05-05 07:31:12 $
Revision: $Revision: 1.5 $
Last update: $Date: 2004-05-17 15:03:50 $
------------
 
This file contains the scheduling module EDF (Earliest Deadline First)
38,7 → 38,9
wcet field and mit field must be != 0. They are used to set the wcet
and period of the tasks.
periodicity field can be either PERIODIC or APERIODIC
drel field is ignored
drel field must be <= mit. NOTE: a drel of 0 is interpreted as mit.
offset field specifies a release offset relative to task_activate or
group_activate.
 
Guest Models Accepted:
JOB_TASK_MODEL - a single guest task activation
46,15 → 48,15
period field is ignored
 
Description:
This module schedule his tasks following the classic EDF scheme.
The task guarantee is based on the factor utilization approach.
The tasks scheduled are periodic and sporadic. The sporadic tasks
are like hard task with periodicity set to APERIODIC; they are guaranteed
as a periodic task with period equal to the minimum interarrival time.
All the task are put in a queue and the scheduling is based on the
deadline value.
NO GUARANTEE is performed on guest tasks. The guarantee must be performed
by the level that inserts guest tasks in the EDF level.
This module schedules periodic and sporadic tasks based on their
absolute deadlines. The task guarantee is based on a simple
utilization approach. The utilization factor of a task is computed
as wcet/drel. (By default, drel = mit.) A periodic task must only
be activated once; subsequent activations are triggered by an
internal timer. By contrast, an sporadic task must be explicitely
activated for each instance. NO GUARANTEE is performed on guest
tasks. The guarantee must be performed by the level that inserts
guest tasks in the EDF level.
 
Exceptions raised:
XUNVALID_GUEST
61,39 → 63,58
This level doesn't support guests. When a guest operation
is called, the exception is raised.
 
These exceptions are pclass-dependent...
The following exceptions may be raised by the module:
XDEADLINE_MISS
If a task miss his deadline, the exception is raised.
If a task misses its deadline and the EDF_ENABLE_DL_EXCEPTION
flag is set, this exception is raised.
 
XWCET_VIOLATION
If a task doesn't end the current cycle before if consume the wcet,
an exception is raised, and the task is put in the EDF_WCET_VIOLATED
state. To reactivate it, use EDF_task_activate via task_activate or
manage directly the EDF data structure. Note that the exception is not
handled properly, an XDEADLINE_MISS exeeption will also be raised at
the period end...
XWCET_VIOLATION
If a task executes longer than its declared wcet and the
EDF_ENABLE_WCET_EXCEPTION flag is set, this exception is raised.
 
XACTIVATION
If a sporadic task is activated with a rate that is greather than the
rate declared in the model, this exception is raised and the task is NOT
activated.
This exception is also raised if we are trying to activate a periodic task
stopped with task_sleep before the deadline in which the task_sleep is
called.
If a sporadic task is activated more often than its declared mit
and the EDF_ENABLE_ACT_EXCEPTION flag is set, this exception is
raised. This exception is also raised if a periodic task is
activated while not in the SLEEP state.
 
Restrictions & special features:
 
- Relative deadlines drel <= mit may be specified.
- An offset > 0 will delay the activation of the task by the same
amount of time. To synchronize a group of tasks, use the
group_activate function.
- This level doesn't manage the main task.
- At init time we can choose if the level have to activate
. the wcet check
(If a task require more time than declared, it is stopped and put in
the state EDF_WCET_VIOLATED; a XWCET_VIOLATION exception is raised)
. the task guarantee algorithm
(when all task are created the system will check that the task_set
will not use more than the available bandwidth)
- The level use the priority and timespec_priority fields.
- The level uses the priority and timespec_priority fields.
- A function to return the used bandwidth of a level is provided.
- The guest tasks don't provide the guest_endcycle function
- At init time, the user can specify the behavior in case of
deadline and wcet overruns. The following flags are available:
 
(No flags enabled) - Deadline and wcet overruns are ignored.
Pending periodic jobs are queued and are
eventually scheduled with correct deadlines
according to their original arrival times.
Sporadic tasks that arrive to often are
simply dropped.
EDF_ENABLE_DL_CHECK - When a deadline overrun occurs, the
dl_miss counter of the task is increased.
Same behavior for pending jobs as above.
EDF_ENABLE_WCET_CHECK - When a wcet overrun occurs, the
wcet_miss counter of the task is increased.
Same behavior for pending jobs as above.
EDF_ENABLE_DL_EXCEPTION - When a deadline overrun occurs, an
exception is raised.
EDF_ENABLE_WCET_EXCEPTION - When a wcet overrun occurs, an
exception is raised.
EDF_ENABLE_ACT_EXCEPTION When a periodic or sporadic task is activated
too often, an exception is raised.
 
- The functions EDF_get_dl_miss, EDF_get_wcet_miss, EDF_get_nact,
and EDF_get_nskip can be used to find out the number of missed
deadlines, number of wcet overruns, number of currently queued
periodic activations, and the number of skipped sporadic activations.
 
**/
 
/*
129,21 → 150,26
 
 
/*+ flags... +*/
#define EDF_DISABLE_ALL 0
#define EDF_ENABLE_WCET_CHECK 1 /*+ Wcet check enabled +*/
#define EDF_ENABLE_GUARANTEE 2 /*+ Task Guarantee enabled +*/
#define EDF_ENABLE_ALL 3 /*+ All flags enabled +*/
#define EDF_DISABLE_ALL 0
#define EDF_ENABLE_GUARANTEE 1 /*+ Task guarantee enabled +*/
#define EDF_ENABLE_WCET_CHECK 2 /*+ Wcet monitoring enabled +*/
#define EDF_ENABLE_DL_CHECK 4 /*+ Deadline monitoring enabled +*/
#define EDF_ENABLE_WCET_EXCEPTION 8 /*+ Wcet overrun exception enabled +*/
#define EDF_ENABLE_DL_EXCEPTION 16 /*+ Deadline overrun exception enabled +*/
#define EDF_ENABLE_ACT_EXCEPTION 32 /*+ Activation exception enabled +*/
#define EDF_ENABLE_ALL 63 /*+ All flags enabled +*/
 
 
/*+ Registration function:
int flag Options to be used in this level instance...
/*+ Registration function +*/
LEVEL EDF_register_level(int flags);
 
returns the level number at which the module has been registered.
+*/
LEVEL EDF_register_level(int flag);
 
/*+ Returns the used bandwidth of a level +*/
bandwidth_t EDF_usedbandwidth(LEVEL l);
 
int EDF_get_dl_miss(PID p);
int EDF_get_wcet_miss(PID p);
int EDF_get_nact(PID p);
int EDF_get_nskip(PID p);
 
__END_DECLS
#endif
/shark/trunk/include/modules/rm.h
21,17 → 21,17
 
/**
------------
CVS : $Id: rm.h,v 1.4 2003-05-05 07:31:12 pj Exp $
CVS : $Id: rm.h,v 1.5 2004-05-17 15:03:51 anton Exp $
 
File: $File$
Revision: $Revision: 1.4 $
Last update: $Date: 2003-05-05 07:31:12 $
Revision: $Revision: 1.5 $
Last update: $Date: 2004-05-17 15:03:51 $
------------
 
This file contains the scheduling module RM (Rate Monotonic)
This file contains the scheduling module RM (rate/deadline monotonic)
 
Title:
RM (Rate Monotonic
RM (Rate/Deadline Monotonic)
 
Task Models Accepted:
HARD_TASK_MODEL - Hard Tasks (Periodic and Sporadic)
38,61 → 38,84
wcet field and mit field must be != 0. They are used to set the wcet
and period of the tasks.
periodicity field can be either PERIODIC or APERIODIC
drel field is ignored
drel field must be <= mit. NOTE: a drel of 0 is interpreted as mit.
offset field specifies a release offset relative to task_activate or
group_activate.
 
Guest Models Accepted:
JOB_TASK_MODEL - a single guest task activation
Identified by an absolute deadline and a period.
all fieds are used
period field is ignored
 
Description:
This module schedule his tasks following the classic RM scheme.
The task guarantee is based on the factor utilization approach.
The tasks scheduled are periodic and sporadic. The sporadic tasks
are like hard task with periodicity set to APERIODIC; they are guaranteed
as a periodic task with period equal to the minimum interarrival time.
All the task are put in a queue and the scheduling is based on the
deadline value.
NO GUARANTEE is performed on guest tasks. The guarantee must be performed
by the level that inserts guest tasks in the EDF level.
 
This module schedules periodic and sporadic tasks based on fixed
priorities according to their relative deadlines. (By default, drel
= mit.) The task guarantee is based on a simple utilization
approach. The utilization factor of a task is computed as
wcet/drel. A periodic task must only be activated once; subsequent
activations are triggered by an internal timer. By contrast, an
sporadic task must be explicitely activated for each instance. NO
GUARANTEE is performed on guest tasks. The guarantee must be
performed by the level that inserts guest tasks in the RM level.
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests. When a guest operation
is called, the exception is raised.
 
These exceptions are pclass-dependent...
The following exceptions may be raised by the module:
XDEADLINE_MISS
If a task miss his deadline, the exception is raised.
If a task misses its deadline and the RM_ENABLE_DL_EXCEPTION
flag is set, this exception is raised.
 
XWCET_VIOLATION
If a task doesn't end the current cycle before if consume the wcet,
an exception is raised, and the task is put in the RM_WCET_VIOLATED
state. To reactivate it, use RM_task_activate via task_activate or
manage directly the RM data structure. Note that the exception is not
handled properly, an XDEADLINE_MISS exeception will also be raised at
the period end...
XWCET_VIOLATION
If a task executes longer than its declared wcet and the
RM_ENABLE_WCET_EXCEPTION flag is set, this exception is raised.
 
XACTIVATION
If a sporadic task is activated with a rate that is greather than the
rate declared in the model, this exception is raised and the task is NOT
activated.
This exception is also raised if we are trying to activate a periodic task
stopped with task_sleep before the deadline in which the task_sleep is
called.
If a sporadic task is activated more often than its declared mit
and the RM_ENABLE_ACT_EXCEPTION flag is set, this exception is
raised. This exception is also raised if a periodic task is
activated while not in the SLEEP state.
 
Restrictions & special features:
 
- Relative deadlines drel <= mit may be specified.
- An offset > 0 will delay the activation of the task by the same
amount of time. To synchronize a group of tasks, use the
group_activate function.
- This level doesn't manage the main task.
- At init time we can choose if the level have to activate
. the wcet check
(If a task require more time than declared, it is stopped and put in
the state RM_WCET_VIOLATED; a XWCET_VIOLATION exception is raised)
. the task guarantee algorithm
(when all task are created the system will check that the task_set
will not use more than the available bandwidth)
- The level use the priority field.
- The level uses the priority and timespec_priority fields.
- A function to return the used bandwidth of a level is provided.
- The guest tasks don't provide the guest_endcycle function
- At init time, the user can specify the behavior in case of
deadline and wcet overruns. The following flags are available:
 
(No flags enabled) - Deadline and wcet overruns are ignored.
Pending periodic jobs are queued and are
eventually scheduled with correct deadlines
according to their original arrival times.
Sporadic tasks that arrive to often are
simply dropped.
RM_ENABLE_DL_CHECK - When a deadline overrun occurs, the
dl_miss counter of the task is increased.
Same behavior for pending jobs as above.
RM_ENABLE_WCET_CHECK - When a wcet overrun occurs, the
wcet_miss counter of the task is increased.
Same behavior for pending jobs as above.
RM_ENABLE_DL_EXCEPTION - When a deadline overrun occurs, an
exception is raised.
RM_ENABLE_WCET_EXCEPTION - When a wcet overrun occurs, an
exception is raised.
RM_ENABLE_ACT_EXCEPTION When a periodic or sporadic task is activated
too often, an exception is raised.
 
- The functions RM_get_dl_miss, RM_get_wcet_miss, RM_get_nact,
and RM_get_nskip can be used to find out the number of missed
deadlines, number of wcet overruns, number of currently queued
periodic activations, and the number of skipped sporadic activations.
 
**/
 
/*
126,22 → 149,20
 
__BEGIN_DECLS
 
/*+ 1 - ln(2) +*/
 
#ifndef RM_MINFREEBANDWIDTH
#define RM_MINFREEBANDWIDTH 1317922825
#endif
/*+ flags... +*/
#define RM_DISABLE_ALL 0
#define RM_ENABLE_GUARANTEE 1 /*+ Task guarantee enabled +*/
#define RM_ENABLE_WCET_CHECK 2 /*+ Wcet monitoring enabled +*/
#define RM_ENABLE_DL_CHECK 4 /*+ Deadline monitoring enabled +*/
#define RM_ENABLE_WCET_EXCEPTION 8 /*+ Wcet overrun exception enabled +*/
#define RM_ENABLE_DL_EXCEPTION 16 /*+ Deadline overrun exception enabled +*/
#define RM_ENABLE_ACT_EXCEPTION 32 /*+ Activation exception enabled +*/
#define RM_ENABLE_ALL 63 /*+ All flags enabled +*/
 
 
 
/*+ flags... +*/
#define RM_DISABLE_ALL 0
#define RM_ENABLE_WCET_CHECK 1 /*+ Wcet check enabled +*/
#define RM_ENABLE_GUARANTEE 2 /*+ Task Guarantee enabled +*/
#define RM_ENABLE_ALL 3 /*+ All flags enabled +*/
 
/*+ Registration function:
int flag Options to be used in this level instance...
int flag Options to be used in this level instance
 
returns the level number at which the module has been registered.
+*/
150,5 → 171,10
/*+ Returns the used bandwidth of a level +*/
bandwidth_t RM_usedbandwidth(LEVEL l);
 
int RM_get_dl_miss(PID p);
int RM_get_wcet_miss(PID p);
int RM_get_nact(PID p);
int RM_get_nskip(PID p);
 
__END_DECLS
#endif