Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 758 → Rev 759

/shark/trunk/kernel/modules/edf.c
5,11 → 5,11
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Authors:
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
* Anton Cervin
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
20,11 → 20,11
 
/**
------------
CVS : $Id: edf.c,v 1.15 2004-05-26 15:36:23 anton Exp $
CVS : $Id: edf.c,v 1.16 2004-06-21 11:22:05 anton Exp $
 
File: $File$
Revision: $Revision: 1.15 $
Last update: $Date: 2004-05-26 15:36:23 $
Revision: $Revision: 1.16 $
Last update: $Date: 2004-06-21 11:22:05 $
------------
 
This file contains the scheduling module EDF (Earliest Deadline First)
64,7 → 64,6
 
//#define EDF_DEBUG
#define edf_printf kern_printf
 
#ifdef EDF_DEBUG
char *pnow() {
static char buf[40];
85,73 → 84,132
}
#endif
 
/* statuses used in the level */
/* Statuses used in the level */
#define EDF_READY MODULE_STATUS_BASE /* ready */
#define EDF_IDLE MODULE_STATUS_BASE+1 /* idle, waiting for offset/eop */
#define EDF_WAIT MODULE_STATUS_BASE+2 /* to sleep, waiting for eop */
#define EDF_ZOMBIE MODULE_STATUS_BASE+3 /* zombie, waiting for eop */
#define EDF_ZOMBIE MODULE_STATUS_BASE+3 /* to free, waiting for eop */
 
/* task flags */
/* Task flags */
#define EDF_FLAG_SPORADIC 1 /* the task is sporadic */
#define EDF_FLAG_SPOR_LATE 2 /* sporadic task with period overrun */
 
 
/* the level redefinition for the Earliest Deadline First level */
/* Task descriptor */
typedef struct {
level_des l; /* standard level descriptor */
IQUEUE ready; /* the ready queue */
int flags; /* level flags */
bandwidth_t U; /* used bandwidth */
int flags; /* task flags */
TIME period; /* period (or inter-arrival interval) */
TIME rdeadline; /* relative deadline */
TIME offset; /* release offset */
struct timespec release; /* release time of current instance */
struct timespec adeadline; /* latest assigned deadline */
int dl_timer; /* deadline timer */
int eop_timer; /* end of period timer */
int dl_miss; /* deadline miss counter */
int wcet_miss; /* WCET miss counter */
int act_miss; /* activation miss counter */
int nact; /* number of pending periodic jobs */
} EDF_task_des;
 
int taskflags[MAX_PROC]; /* task flags */
TIME period[MAX_PROC]; /* task period */
TIME rdeadline[MAX_PROC]; /* task relative deadlines */
TIME offset[MAX_PROC]; /* task release offsets */
struct timespec release[MAX_PROC]; /* release time of the task */
struct timespec adeadline[MAX_PROC]; /* latest assigned deadline
(needed to correctly assign deadlines to queued activations) */
int dl_timer[MAX_PROC]; /* deadline overrun timer */
int eop_timer[MAX_PROC]; /* end of period timer */
int dl_miss[MAX_PROC]; /* deadline miss counter */
int wcet_miss[MAX_PROC]; /* WCET miss counter */
int nact[MAX_PROC]; /* number of pending periodic jobs */
int nskip[MAX_PROC]; /* number of skipped sporadic jobs */
 
/* Level descriptor */
typedef struct {
level_des l; /* standard level descriptor */
int flags; /* level flags */
IQUEUE ready; /* the ready queue */
bandwidth_t U; /* used bandwidth */
EDF_task_des tvec[MAX_PROC]; /* vector of task descriptors */
} EDF_level_des;
 
 
static void EDF_timer_endperiod(void *par);
/* Module function cross-references */
static void EDF_intern_release(PID p, EDF_level_des *lev);
 
 
/* This function is called when a task misses its deadline */
/**** Timer event handler functions ****/
 
static void EDF_timer_deadline(void *par)
/* This timer event handler is called at the end of the period */
static void EDF_timer_endperiod(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
EDF_level_des *lev = (EDF_level_des *)level_table[proc_table[p].task_level];
EDF_task_des *td = &lev->tvec[p];
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
td->eop_timer = -1;
 
if (proc_table[p].status == EDF_ZOMBIE) {
/* put the task in the FREE state */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/td->rdeadline) * proc_table[p].wcet;
return;
}
 
if (proc_table[p].status == EDF_WAIT) {
proc_table[p].status = SLEEP;
return;
}
if (td->flags & EDF_FLAG_SPORADIC) {
/* the task is sporadic and still busy, mark it as late */
td->flags |= EDF_FLAG_SPOR_LATE;
} else {
/* the task is periodic, release/queue another instance */
EDF_intern_release(p, lev);
}
}
 
/* This timer event handler is called when a task misses its deadline */
static void EDF_timer_deadline(void *par)
{
PID p = (PID) par;
EDF_level_des *lev = (EDF_level_des *)level_table[proc_table[p].task_level];
EDF_task_des *td = &lev->tvec[p];
 
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,
(unsigned short int)proc_table[p].context,0);
 
if (lev->flags & EDF_ENABLE_DL_EXCEPTION) {
kern_raise(XDEADLINE_MISS,p);
} else {
lev->dl_miss[p]++;
td->dl_miss++;
}
}
 
/* This timer event handler is called after waiting for an offset */
static void EDF_timer_offset(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
/* release the task now */
EDF_intern_release(p, lev);
}
 
/* Release (or queue) task, post deadline and endperiod timers.
The release time is stored in lev->release[p]. */
/* This function is called when a guest task misses its deadline */
static void EDF_timer_guest_deadline(void *par)
{
PID p = (PID) par;
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,
(unsigned short int)proc_table[p].context,0);
kern_raise(XDEADLINE_MISS,p);
}
 
 
/**** Internal utility functions ****/
 
/* Release (or queue) a task, post deadline and endperiod timers */
static void EDF_intern_release(PID p, EDF_level_des *lev)
{
struct timespec temp;
EDF_task_des *td = &lev->tvec[p];
 
/* post deadline timer */
if (lev->flags & EDF_ENABLE_DL_CHECK) {
temp = lev->release[p];
ADDUSEC2TIMESPEC(lev->rdeadline[p], &temp);
lev->dl_timer[p] = kern_event_post(&temp,EDF_timer_deadline,(void *)p);
temp = td->release;
ADDUSEC2TIMESPEC(td->rdeadline, &temp);
td->dl_timer = kern_event_post(&temp,EDF_timer_deadline,(void *)p);
}
 
/* release or queue next job */
158,87 → 216,35
if (proc_table[p].status == EDF_IDLE) {
/* assign deadline, insert task in the ready queue */
proc_table[p].status = EDF_READY;
*iq_query_timespec(p,&lev->ready) = lev->adeadline[p];
*iq_query_timespec(p,&lev->ready) = td->adeadline;
iq_timespec_insert(p,&lev->ready);
#ifdef EDF_DEBUG
edf_printf("At %s: releasing %s with deadline %s\n", pnow(),
proc_table[p].name, ptime1(&lev->adeadline[p]));
proc_table[p].name, ptime1(&td->adeadline));
#endif
/* increase assigned deadline */
ADDUSEC2TIMESPEC(lev->period[p], &lev->adeadline[p]);
ADDUSEC2TIMESPEC(td->period, &td->adeadline);
/* reschedule */
event_need_reschedule();
} else {
/* queue */
lev->nact[p]++;
td->nact++;
}
 
/* increase release time */
ADDUSEC2TIMESPEC(lev->period[p],&lev->release[p]);
ADDUSEC2TIMESPEC(td->period, &td->release);
/* post end of period timer */
lev->eop_timer[p] = kern_event_post(&lev->release[p],
EDF_timer_endperiod,(void *)p);
td->eop_timer = kern_event_post(&td->release, EDF_timer_endperiod,(void *)p);
 
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
TRACER_LOGEVENT(FTrace_EVT_task_timer,
(unsigned short int)proc_table[p].context,
(unsigned int)proc_table[p].task_level);
}
 
 
/* Release after an offset */
/**** Public generic kernel interface functions ****/
 
static void EDF_timer_offset(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
 
EDF_intern_release(p, lev);
}
 
 
/* This function is called at the end of the period */
 
static void EDF_timer_endperiod(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
 
lev->eop_timer[p] = -1;
 
if (proc_table[p].status == EDF_ZOMBIE) {
/* put the task in the FREE state */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
return;
}
 
if (proc_table[p].status == EDF_WAIT) {
proc_table[p].status = SLEEP;
return;
}
if (!(lev->taskflags[p] & EDF_FLAG_SPORADIC)) {
/* if the task is periodic, rerelease it (now or later) */
EDF_intern_release(p, lev);
} else {
/* the sporadic task is still busy. mark it as late */
lev->taskflags[p] |= EDF_FLAG_SPOR_LATE;
}
}
 
 
/* This function is called when a guest task misses its deadline */
 
static void EDF_timer_guest_deadline(void *par)
{
PID p = (PID) par;
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
kern_raise(XDEADLINE_MISS,p);
}
 
/* The scheduler only gets the first task in the queue */
/* Returns the first task in the ready queue */
static PID EDF_public_scheduler(LEVEL l)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
245,7 → 251,7
return iq_query_first(&lev->ready);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
/* Checks and decreases the available system bandwidth */
static int EDF_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
258,9 → 264,11
return 0;
}
 
/* Called by task_create: Checks task model and creates a task */
static int EDF_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
HARD_TASK_MODEL *h;
 
if (m->pclass != HARD_PCLASS) return -1;
270,15 → 278,15
if (h->drel > h->mit) return -1; /* only D <= T supported */
 
if (!h->drel) {
lev->rdeadline[p] = h->mit;
td->rdeadline = h->mit;
} else {
lev->rdeadline[p] = h->drel;
td->rdeadline = h->drel;
}
 
/* check the free bandwidth... */
if (lev->flags & EDF_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / lev->rdeadline[p]) * h->wcet;
b = (MAX_BANDWIDTH / td->rdeadline) * h->wcet;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b) {
288,27 → 296,23
}
}
 
if (lev->flags & EDF_ENABLE_WCET_EXCEPTION) {
lev->flags |= EDF_ENABLE_WCET_CHECK;
td->flags = 0;
if (h->periodicity == APERIODIC) {
td->flags |= EDF_FLAG_SPORADIC;
}
if (lev->flags & EDF_ENABLE_DL_EXCEPTION) {
lev->flags |= EDF_ENABLE_DL_CHECK;
td->period = h->mit;
if (td->rdeadline == td->period) {
/* Ensure that D <= T-eps to make dl_timer trigger before eop_timer */
td->rdeadline = td->period - 1;
}
td->offset = h->offset;
td->dl_timer = -1;
td->eop_timer = -1;
td->dl_miss = 0;
td->wcet_miss = 0;
td->act_miss = 0;
td->nact = 0;
 
lev->period[p] = h->mit;
if (lev->rdeadline[p] == lev->period[p]) {
/* Ensure that D <= T-eps to make dl_timer trigger before rel_timer */
lev->rdeadline[p] = lev->period[p] - 1;
}
lev->taskflags[p] = 0;
 
if (h->periodicity == APERIODIC)
lev->taskflags[p] |= EDF_FLAG_SPORADIC;
lev->dl_timer[p] = -1;
lev->eop_timer[p] = -1;
 
/* Enable wcet check */
if (lev->flags & EDF_ENABLE_WCET_CHECK) {
proc_table[p].avail_time = h->wcet;
316,22 → 320,21
proc_table[p].control |= CONTROL_CAP; /* turn on measurement */
}
 
lev->offset[p] = h->offset;
 
NULL_TIMESPEC(&lev->release[p]);
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
/* Reclaim the bandwidth used by the task */
static void EDF_public_detach(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
if (lev->flags & EDF_ENABLE_GUARANTEE) {
lev->U -= (MAX_BANDWIDTH / lev->rdeadline[p]) * proc_table[p].wcet;
lev->U -= (MAX_BANDWIDTH / td->rdeadline) * proc_table[p].wcet;
}
}
 
/* Extracts the running task from the ready queue */
static void EDF_public_dispatch(LEVEL l, PID p, int nostop)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
338,19 → 341,22
iq_extract(p, &lev->ready);
}
 
/* Called when the task is preempted or when its budget is exhausted */
static void EDF_public_epilogue(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
/* check if the wcet is finished... */
if (lev->flags & EDF_ENABLE_WCET_CHECK) {
if (proc_table[p].avail_time <= 0) {
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,(unsigned short int)proc_table[p].context,0);
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,
(unsigned short int)proc_table[p].context,0);
if (lev->flags & EDF_ENABLE_WCET_EXCEPTION) {
kern_raise(XWCET_VIOLATION,p);
} else {
proc_table[p].control &= ~CONTROL_CAP;
lev->wcet_miss[p]++;
td->wcet_miss++;
}
}
}
361,10 → 367,12
 
}
 
/* Called by task_activate or group_activate: Activates the task at time t */
static void EDF_public_activate(LEVEL l, PID p, struct timespec *t)
{
struct timespec clocktime;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
kern_gettime(&clocktime);
 
376,29 → 384,30
} else {
/* skip the sporadic job, but increase a counter */
#ifdef EDF_DEBUG
edf_printf("At %s: activation of %s skipped\n", pnow(), proc_table[p].name);
edf_printf("At %s: activation of %s skipped\n", pnow(),
proc_table[p].name);
#endif
lev->nskip[p]++;
td->act_miss++;
}
return;
}
/* set the release time to the activation time + offset */
lev->release[p] = *t;
ADDUSEC2TIMESPEC(lev->offset[p], &lev->release[p]);
td->release = *t;
ADDUSEC2TIMESPEC(td->offset, &td->release);
 
/* set the absolute deadline to the activation time + offset + rdeadline */
lev->adeadline[p] = lev->release[p];
ADDUSEC2TIMESPEC(lev->rdeadline[p], &lev->adeadline[p]);
td->adeadline = td->release;
ADDUSEC2TIMESPEC(td->rdeadline, &td->adeadline);
 
/* Check if release > clocktime. If so, release it later,
/* Check if release > clocktime. If yes, release it later,
otherwise release it now. */
 
proc_table[p].status = EDF_IDLE;
 
if (TIMESPEC_A_GT_B(&lev->release[p], &clocktime)) {
/* release later */
kern_event_post(&lev->release[p],EDF_timer_offset,(void *)p);
if (TIMESPEC_A_GT_B(&td->release, &clocktime)) {
/* release later, post an offset timer */
kern_event_post(&td->release,EDF_timer_offset,(void *)p);
} else {
/* release now */
EDF_intern_release(p, lev);
405,6 → 414,7
}
}
 
/* Reinserts a task that has been blocked into the ready queue */
static void EDF_public_unblock(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
414,6 → 424,7
iq_timespec_insert(p,&lev->ready);
}
 
/* Called when a task experiences a synchronization block */
static void EDF_public_block(LEVEL l, PID p)
{
/* Extract the running task from the level
427,28 → 438,30
*/
}
 
/* Called by task_endcycle or task_sleep: Ends the current instance */
static int EDF_public_message(LEVEL l, PID p, void *m)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
switch((long)(m)) {
/* task_endcycle() */
case 0:
/* if there are no pending jobs */
if (lev->nact[p] == 0) {
if (td->nact == 0) {
/* remove deadline timer, if any */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
if (lev->taskflags[p] & EDF_FLAG_SPORADIC) {
if (td->flags & EDF_FLAG_SPORADIC) {
/* sporadic task */
if (!(lev->taskflags[p] & EDF_FLAG_SPOR_LATE)) {
if (!(td->flags & EDF_FLAG_SPOR_LATE)) {
proc_table[p].status = EDF_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
lev->taskflags[p] &= ~EDF_FLAG_SPOR_LATE;
td->flags &= ~EDF_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
456,15 → 469,15
}
} else {
/* we are late / there are pending jobs */
lev->nact[p]--;
td->nact--;
/* compute and assign absolute deadline */
*iq_query_timespec(p,&lev->ready) = lev->adeadline[p];
*iq_query_timespec(p,&lev->ready) = td->adeadline;
iq_timespec_insert(p,&lev->ready);
/* increase assigned deadline */
ADDUSEC2TIMESPEC(lev->period[p], &lev->adeadline[p]);
ADDUSEC2TIMESPEC(td->period, &td->adeadline);
#ifdef EDF_DEBUG
edf_printf("(Late) At %s: releasing %s with deadline %s\n",
pnow(),proc_table[p].name,ptime1(&lev->adeadline[p]));
pnow(),proc_table[p].name,ptime1(&td->adeadline));
#endif
}
break;
472,32 → 485,32
/* task_sleep() */
case 1:
/* remove deadline timer, if any */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
if (lev->taskflags[p] & EDF_FLAG_SPORADIC) {
if (td->flags & EDF_FLAG_SPORADIC) {
/* sporadic task */
if (!(lev->taskflags[p] & EDF_FLAG_SPOR_LATE)) {
if (!(td->flags & EDF_FLAG_SPOR_LATE)) {
proc_table[p].status = EDF_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
lev->taskflags[p] &= ~EDF_FLAG_SPOR_LATE;
td->flags &= ~EDF_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
if (!(lev->nact[p] > 0)) {
if (!(td->nact > 0)) {
/* we are on time. go to the EDF_WAIT state */
proc_table[p].status = EDF_WAIT;
} else {
/* we are late. delete pending activations and go to SLEEP */
lev->nact[p] = 0;
td->nact = 0;
proc_table[p].status = SLEEP;
/* remove end of period timer */
if (lev->eop_timer[p] != -1) {
kern_event_delete(lev->eop_timer[p]);
lev->eop_timer[p] = -1;
if (td->eop_timer != -1) {
kern_event_delete(td->eop_timer);
td->eop_timer = -1;
}
}
}
509,21 → 522,24
}
jet_update_endcycle(); /* Update the Jet data... */
proc_table[p].avail_time = proc_table[p].wcet;
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,
(unsigned short int)proc_table[p].context,(unsigned int)l);
return 0;
 
}
 
/* End the task and free the resources at the end of the period */
static void EDF_public_end(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
if (!(lev->taskflags[p] & EDF_FLAG_SPOR_LATE)) {
if (!(td->flags & EDF_FLAG_SPOR_LATE)) {
/* remove the deadline timer (if any) */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
proc_table[p].status = EDF_ZOMBIE;
} else {
531,13 → 547,17
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
lev->U -= (MAX_BANDWIDTH/td->rdeadline) * proc_table[p].wcet;
}
}
 
/**** Private generic kernel interface functions (guest calls) ****/
 
/* Insert a guest task */
static void EDF_private_insert(LEVEL l, PID p, TASK_MODEL *m)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
JOB_TASK_MODEL *job;
 
if (m->pclass != JOB_PCLASS || (m->level != 0 && m->level != l) ) {
552,16 → 572,17
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
lev->dl_timer[p] = -1;
td->dl_timer = -1;
 
lev->period[p] = job->period;
td->period = job->period;
 
if (!job->noraiseexc) {
lev->dl_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
td->dl_timer = kern_event_post(iq_query_timespec(p, &lev->ready),
EDF_timer_guest_deadline,(void *)p);
}
}
 
/* Dispatch a guest task */
static void EDF_private_dispatch(LEVEL l, PID p, int nostop)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
572,6 → 593,7
iq_extract(p, &lev->ready);
}
 
/* Called when a guest task is preempted/out of budget */
static void EDF_private_epilogue(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
581,29 → 603,30
proc_table[p].status = EDF_READY;
}
 
/* Extract a guest task */
static void EDF_private_extract(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
if (proc_table[p].status == EDF_READY)
iq_extract(p, &lev->ready);
 
/* we remove the deadline timer, because the slice is finished */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
 
}
 
 
/* Registration function:
int flags the init flags ... see edf.h */
/**** Level registration function ****/
 
LEVEL EDF_register_level(int flags)
{
LEVEL l; /* the level that we register */
EDF_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("EDF_register_level\n");
 
634,25 → 657,25
lev->l.public_block = EDF_public_block;
lev->l.public_message = EDF_public_message;
 
/* fill the EDF descriptor part */
for(i=0; i<MAX_PROC; i++) {
lev->period[i] = 0;
lev->dl_timer[i] = -1;
lev->eop_timer[i] = -1;
lev->taskflags[i] = 0;
lev->dl_miss[i] = 0;
lev->wcet_miss[i] = 0;
lev->nact[i] = 0;
lev->nskip[i] = 0;
}
iq_init(&lev->ready, &freedesc, 0);
 
iq_init(&lev->ready, &freedesc, 0);
lev->flags = flags;
lev->U = 0;
if (lev->flags & EDF_ENABLE_WCET_EXCEPTION) {
lev->flags |= EDF_ENABLE_WCET_CHECK;
}
if (lev->flags & EDF_ENABLE_DL_EXCEPTION) {
lev->flags |= EDF_ENABLE_DL_CHECK;
}
 
lev->U = 0;
 
return l;
}
 
 
/**** Public utility functions ****/
 
/* Get the bandwidth used by the level */
bandwidth_t EDF_usedbandwidth(LEVEL l)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
660,35 → 683,44
return lev->U;
}
 
int EDF_get_nact(PID p)
/* Get the number of missed deadlines for a task */
int EDF_get_dl_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
return lev->nact[p];
EDF_task_des *td = &lev->tvec[p];
 
return td->dl_miss;
}
 
int EDF_get_dl_miss(PID p)
/* Get the number of execution overruns for a task */
int EDF_get_wcet_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
return lev->dl_miss[p];
return td->wcet_miss;
}
 
int EDF_get_wcet_miss(PID p)
/* Get the number of skipped activations for a task */
int EDF_get_act_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
return lev->wcet_miss[p];
EDF_task_des *td = &lev->tvec[p];
return td->act_miss;
}
 
int EDF_get_nskip(PID p)
/* Get the current number of queued activations for a task */
int EDF_get_nact(PID p)
{
LEVEL l = proc_table[p].task_level;
 
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
return lev->nskip[p];
return td->nact;
}