Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 760 → Rev 761

/shark/trunk/kernel/modules/rm.c
5,11 → 5,11
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Authors:
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
* Anton Cervin
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
20,14 → 20,14
 
/**
------------
CVS : $Id: rm.c,v 1.9 2004-05-17 15:03:52 anton Exp $
CVS : $Id: rm.c,v 1.10 2004-06-21 11:43:02 anton Exp $
 
File: $File$
Revision: $Revision: 1.9 $
Last update: $Date: 2004-05-17 15:03:52 $
Revision: $Revision: 1.10 $
Last update: $Date: 2004-06-21 11:43:02 $
------------
 
This file contains the scheduling module RM (rate/deadline monotonic)
This file contains the scheduling module RM (rate-/deadline-monotonic)
 
Read rm.h for further details.
 
64,9 → 64,7
 
//#define RM_DEBUG
#define rm_printf kern_printf
 
#ifdef RM_DEBUG
/* some debug print functions */
char *pnow() {
static char buf[40];
struct timespec t;
86,71 → 84,132
}
#endif
 
/* statuses used in the level */
#define RM_READY MODULE_STATUS_BASE /* ready */
#define RM_IDLE MODULE_STATUS_BASE+1 /* idle, waiting for offset/eop */
#define RM_WAIT MODULE_STATUS_BASE+2 /* to sleep, waiting for eop */
#define RM_ZOMBIE MODULE_STATUS_BASE+3 /* zombie, waiting for eop */
/* Statuses used in the level */
#define RM_READY MODULE_STATUS_BASE /* ready */
#define RM_IDLE MODULE_STATUS_BASE+1 /* idle, waiting for offset/eop */
#define RM_WAIT MODULE_STATUS_BASE+2 /* to sleep, waiting for eop */
#define RM_ZOMBIE MODULE_STATUS_BASE+3 /* to free, waiting for eop */
 
/* task flags */
/* Task flags */
#define RM_FLAG_SPORADIC 1 /* the task is sporadic */
#define RM_FLAG_SPOR_LATE 2 /* sporadic task with period overrun */
 
 
/* the level redefinition for the Earliest Deadline First level */
/* Task descriptor */
typedef struct {
level_des l; /* standard level descriptor */
IQUEUE ready; /* the ready queue */
int flags; /* level flags */
bandwidth_t U; /* used bandwidth */
int flags; /* task flags */
TIME period; /* period (or inter-arrival interval) */
TIME rdeadline; /* relative deadline */
TIME offset; /* release offset */
struct timespec release; /* release time of current instance */
struct timespec adeadline; /* latest assigned deadline */
int dl_timer; /* deadline timer */
int eop_timer; /* end of period timer */
int dl_miss; /* deadline miss counter */
int wcet_miss; /* WCET miss counter */
int act_miss; /* activation miss counter */
int nact; /* number of pending periodic jobs */
} RM_task_des;
 
int taskflags[MAX_PROC]; /* task flags */
TIME period[MAX_PROC]; /* task period */
TIME rdeadline[MAX_PROC]; /* task relative deadlines */
TIME offset[MAX_PROC]; /* task release offsets */
struct timespec release[MAX_PROC]; /* release time of the task */
int dl_timer[MAX_PROC]; /* deadline overrun timer */
int eop_timer[MAX_PROC]; /* end of period timer */
int dl_miss[MAX_PROC]; /* deadline miss counter */
int wcet_miss[MAX_PROC]; /* WCET miss counter */
int nact[MAX_PROC]; /* number of pending periodic jobs */
int nskip[MAX_PROC]; /* number of skipped sporadic jobs */
 
/* Level descriptor */
typedef struct {
level_des l; /* standard level descriptor */
int flags; /* level flags */
IQUEUE ready; /* the ready queue */
bandwidth_t U; /* used bandwidth */
RM_task_des tvec[MAX_PROC]; /* vector of task descriptors */
} RM_level_des;
 
 
static void RM_timer_endperiod(void *par);
/* Module function cross-references */
static void RM_intern_release(PID p, RM_level_des *lev);
 
 
/* This function is called when a task misses its deadline */
/**** Timer event handler functions ****/
 
static void RM_timer_deadline(void *par)
/* This timer event handler is called at the end of the period */
static void RM_timer_endperiod(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
lev = (RM_level_des *)level_table[proc_table[p].task_level];
RM_level_des *lev = (RM_level_des *)level_table[proc_table[p].task_level];
RM_task_des *td = &lev->tvec[p];
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
td->eop_timer = -1;
 
if (proc_table[p].status == RM_ZOMBIE) {
/* put the task in the FREE state */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/td->rdeadline) * proc_table[p].wcet;
return;
}
 
if (proc_table[p].status == RM_WAIT) {
proc_table[p].status = SLEEP;
return;
}
if (td->flags & RM_FLAG_SPORADIC) {
/* the task is sporadic and still busy, mark it as late */
td->flags |= RM_FLAG_SPOR_LATE;
} else {
/* the task is periodic, release/queue another instance */
RM_intern_release(p, lev);
}
}
 
/* This timer event handler is called when a task misses its deadline */
static void RM_timer_deadline(void *par)
{
PID p = (PID) par;
RM_level_des *lev = (RM_level_des *)level_table[proc_table[p].task_level];
RM_task_des *td = &lev->tvec[p];
 
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,
(unsigned short int)proc_table[p].context,0);
 
if (lev->flags & RM_ENABLE_DL_EXCEPTION) {
kern_raise(XDEADLINE_MISS,p);
} else {
lev->dl_miss[p]++;
td->dl_miss++;
}
}
 
/* This timer event handler is called after waiting for an offset */
static void RM_timer_offset(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
lev = (RM_level_des *)level_table[proc_table[p].task_level];
/* release the task now */
RM_intern_release(p, lev);
}
 
/* Release (or queue) task, post deadline and endperiod timers.
The release time is stored in lev->release[p]. */
/* This function is called when a guest task misses its deadline */
static void RM_timer_guest_deadline(void *par)
{
PID p = (PID) par;
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,
(unsigned short int)proc_table[p].context,0);
kern_raise(XDEADLINE_MISS,p);
}
 
 
/**** Internal utility functions ****/
 
/* Release (or queue) a task, post deadline and endperiod timers */
static void RM_intern_release(PID p, RM_level_des *lev)
{
struct timespec temp;
RM_task_des *td = &lev->tvec[p];
 
/* post deadline timer */
if (lev->flags & RM_ENABLE_DL_CHECK) {
temp = lev->release[p];
ADDUSEC2TIMESPEC(lev->rdeadline[p], &temp);
lev->dl_timer[p] = kern_event_post(&temp,RM_timer_deadline,(void *)p);
temp = td->release;
ADDUSEC2TIMESPEC(td->rdeadline, &temp);
td->dl_timer = kern_event_post(&temp,RM_timer_deadline,(void *)p);
}
 
/* release or queue next job */
157,81 → 216,35
if (proc_table[p].status == RM_IDLE) {
/* assign deadline, insert task in the ready queue */
proc_table[p].status = RM_READY;
*iq_query_priority(p,&lev->ready) = lev->rdeadline[p];
*iq_query_priority(p,&lev->ready) = td->rdeadline;
iq_priority_insert(p,&lev->ready);
#ifdef RM_DEBUG
rm_printf("At %s: releasing %s\n", pnow(), proc_table[p].name);
rm_printf("At %s: releasing %s with deadline %s\n", pnow(),
proc_table[p].name, ptime1(&td->adeadline));
#endif
/* increase assigned deadline */
ADDUSEC2TIMESPEC(td->period, &td->adeadline);
/* reschedule */
event_need_reschedule();
} else {
/* queue */
lev->nact[p]++;
td->nact++;
}
 
/* increase release time */
ADDUSEC2TIMESPEC(lev->period[p],&lev->release[p]);
ADDUSEC2TIMESPEC(td->period, &td->release);
/* post end of period timer */
kern_event_post(&lev->release[p],RM_timer_endperiod,(void *)p);
td->eop_timer = kern_event_post(&td->release, RM_timer_endperiod,(void *)p);
 
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
TRACER_LOGEVENT(FTrace_EVT_task_timer,
(unsigned short int)proc_table[p].context,
(unsigned int)proc_table[p].task_level);
}
 
 
/* First release */
/**** Public generic kernel interface functions ****/
 
static void RM_timer_offset(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
lev = (RM_level_des *)level_table[proc_table[p].task_level];
 
RM_intern_release(p, lev);
}
 
 
/* This function is called at the end of the period */
 
static void RM_timer_endperiod(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
lev = (RM_level_des *)level_table[proc_table[p].task_level];
 
if (proc_table[p].status == RM_ZOMBIE) {
/* put the task in the FREE state */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
return;
}
if (!(lev->taskflags[p] & RM_FLAG_SPORADIC)) {
/* if the task is periodic, rerelease it (now or later) */
RM_intern_release(p, lev);
} else {
/* else check if the task is waiting for end of period */
if (proc_table[p].status == RM_WAIT) {
proc_table[p].status = SLEEP;
} else {
/* the task is still busy. mark it as late */
lev->taskflags[p] |= RM_FLAG_SPOR_LATE;
}
}
}
 
 
/* This function is called when a guest task misses its deadline */
 
static void RM_timer_guest_deadline(void *par)
{
PID p = (PID) par;
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
kern_raise(XDEADLINE_MISS,p);
}
 
/* The scheduler only gets the first task in the queue */
/* Returns the first task in the ready queue */
static PID RM_public_scheduler(LEVEL l)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
238,7 → 251,7
return iq_query_first(&lev->ready);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
/* Checks and decreases the available system bandwidth */
static int RM_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
251,9 → 264,11
return 0;
}
 
/* Called by task_create: Checks task model and creates a task */
static int RM_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
HARD_TASK_MODEL *h;
 
if (m->pclass != HARD_PCLASS) return -1;
263,15 → 278,15
if (h->drel > h->mit) return -1; /* only D <= T supported */
 
if (!h->drel) {
lev->rdeadline[p] = h->mit;
td->rdeadline = h->mit;
} else {
lev->rdeadline[p] = h->drel;
td->rdeadline = h->drel;
}
 
/* check the free bandwidth... */
if (lev->flags & RM_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / lev->rdeadline[p]) * h->wcet;
b = (MAX_BANDWIDTH / td->rdeadline) * h->wcet;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b) {
281,27 → 296,23
}
}
 
if (lev->flags & RM_ENABLE_WCET_EXCEPTION) {
lev->flags |= RM_ENABLE_WCET_CHECK;
td->flags = 0;
if (h->periodicity == APERIODIC) {
td->flags |= RM_FLAG_SPORADIC;
}
if (lev->flags & RM_ENABLE_DL_EXCEPTION) {
lev->flags |= RM_ENABLE_DL_CHECK;
td->period = h->mit;
if (td->rdeadline == td->period) {
/* Ensure that D <= T-eps to make dl_timer trigger before eop_timer */
td->rdeadline = td->period - 1;
}
td->offset = h->offset;
td->dl_timer = -1;
td->eop_timer = -1;
td->dl_miss = 0;
td->wcet_miss = 0;
td->act_miss = 0;
td->nact = 0;
 
lev->period[p] = h->mit;
if (lev->rdeadline[p] == lev->period[p]) {
/* Ensure that D <= T-eps to make dl_timer trigger before rel_timer */
lev->rdeadline[p] = lev->period[p] - 1;
}
lev->taskflags[p] = 0;
 
if (h->periodicity == APERIODIC)
lev->taskflags[p] |= RM_FLAG_SPORADIC;
lev->dl_timer[p] = -1;
lev->eop_timer[p] = -1;
 
/* Enable wcet check */
if (lev->flags & RM_ENABLE_WCET_CHECK) {
proc_table[p].avail_time = h->wcet;
309,22 → 320,21
proc_table[p].control |= CONTROL_CAP; /* turn on measurement */
}
 
lev->offset[p] = h->offset;
 
NULL_TIMESPEC(&lev->release[p]);
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
/* Reclaim the bandwidth used by the task */
static void RM_public_detach(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
if (lev->flags & RM_ENABLE_GUARANTEE) {
lev->U -= (MAX_BANDWIDTH / lev->rdeadline[p]) * proc_table[p].wcet;
lev->U -= (MAX_BANDWIDTH / td->rdeadline) * proc_table[p].wcet;
}
}
 
/* Extracts the running task from the ready queue */
static void RM_public_dispatch(LEVEL l, PID p, int nostop)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
331,19 → 341,22
iq_extract(p, &lev->ready);
}
 
/* Called when the task is preempted or when its budget is exhausted */
static void RM_public_epilogue(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
/* check if the wcet is finished... */
if (lev->flags & RM_ENABLE_WCET_CHECK) {
if (proc_table[p].avail_time <= 0) {
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,(unsigned short int)proc_table[p].context,0);
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,
(unsigned short int)proc_table[p].context,0);
if (lev->flags & RM_ENABLE_WCET_EXCEPTION) {
kern_raise(XWCET_VIOLATION,p);
} else {
proc_table[p].control &= ~CONTROL_CAP;
lev->wcet_miss[p]++;
td->wcet_miss++;
}
}
}
354,10 → 367,12
 
}
 
/* Called by task_activate or group_activate: Activates the task at time t */
static void RM_public_activate(LEVEL l, PID p, struct timespec *t)
{
struct timespec clocktime;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
kern_gettime(&clocktime);
 
369,25 → 384,30
} else {
/* skip the sporadic job, but increase a counter */
#ifdef RM_DEBUG
rm_printf("At %s: activation of %s skipped\n", pnow(), proc_table[p].name);
rm_printf("At %s: activation of %s skipped\n", pnow(),
proc_table[p].name);
#endif
lev->nskip[p]++;
td->act_miss++;
}
return;
}
/* set the release time to the activation time + offset */
lev->release[p] = *t;
ADDUSEC2TIMESPEC(lev->offset[p], &lev->release[p]);
td->release = *t;
ADDUSEC2TIMESPEC(td->offset, &td->release);
 
/* Check if release > clocktime. If so, release it later,
/* set the absolute deadline to the activation time + offset + rdeadline */
td->adeadline = td->release;
ADDUSEC2TIMESPEC(td->rdeadline, &td->adeadline);
 
/* Check if release > clocktime. If yes, release it later,
otherwise release it now. */
 
proc_table[p].status = RM_IDLE;
 
if (TIMESPEC_A_GT_B(&lev->release[p], &clocktime)) {
/* release later */
kern_event_post(&lev->release[p],RM_timer_offset,(void *)p);
if (TIMESPEC_A_GT_B(&td->release, &clocktime)) {
/* release later, post an offset timer */
kern_event_post(&td->release,RM_timer_offset,(void *)p);
} else {
/* release now */
RM_intern_release(p, lev);
394,6 → 414,7
}
}
 
/* Reinserts a task that has been blocked into the ready queue */
static void RM_public_unblock(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
403,6 → 424,7
iq_priority_insert(p,&lev->ready);
}
 
/* Called when a task experiences a synchronization block */
static void RM_public_block(LEVEL l, PID p)
{
/* Extract the running task from the level
416,28 → 438,30
*/
}
 
/* Called by task_endcycle or task_sleep: Ends the current instance */
static int RM_public_message(LEVEL l, PID p, void *m)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
switch((long)(m)) {
/* task_endcycle() */
case 0:
/* if there are no pending jobs */
if (lev->nact[p] == 0) {
if (td->nact == 0) {
/* remove deadline timer, if any */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
if (lev->taskflags[p] & RM_FLAG_SPORADIC) {
if (td->flags & RM_FLAG_SPORADIC) {
/* sporadic task */
if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) {
if (!(td->flags & RM_FLAG_SPOR_LATE)) {
proc_table[p].status = RM_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
lev->taskflags[p] &= ~RM_FLAG_SPOR_LATE;
td->flags &= ~RM_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
445,12 → 469,15
}
} else {
/* we are late / there are pending jobs */
lev->nact[p]--;
*iq_query_priority(p,&lev->ready) = lev->rdeadline[p];
td->nact--;
/* compute and assign absolute deadline */
*iq_query_priority(p,&lev->ready) = td->rdeadline;
iq_priority_insert(p,&lev->ready);
/* increase assigned deadline */
ADDUSEC2TIMESPEC(td->period, &td->adeadline);
#ifdef RM_DEBUG
rm_printf("(Late) At %s: releasing %s\n",
pnow(), proc_table[p].name);
rm_printf("(Late) At %s: releasing %s with deadline %s\n",
pnow(),proc_table[p].name,ptime1(&td->adeadline));
#endif
}
break;
458,32 → 485,32
/* task_sleep() */
case 1:
/* remove deadline timer, if any */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
if (lev->taskflags[p] & RM_FLAG_SPORADIC) {
if (td->flags & RM_FLAG_SPORADIC) {
/* sporadic task */
if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) {
if (!(td->flags & RM_FLAG_SPOR_LATE)) {
proc_table[p].status = RM_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
lev->taskflags[p] &= ~RM_FLAG_SPOR_LATE;
td->flags &= ~RM_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
if (!(lev->nact[p] > 0)) {
if (!(td->nact > 0)) {
/* we are on time. go to the RM_WAIT state */
proc_table[p].status = RM_WAIT;
} else {
/* we are late. delete pending activations and go to SLEEP */
lev->nact[p] = 0;
td->nact = 0;
proc_table[p].status = SLEEP;
/* remove end of period timer */
if (lev->eop_timer[p] != -1) {
kern_event_delete(lev->eop_timer[p]);
lev->eop_timer[p] = -1;
if (td->eop_timer != -1) {
kern_event_delete(td->eop_timer);
td->eop_timer = -1;
}
}
}
493,23 → 520,26
if (lev->flags & RM_ENABLE_WCET_CHECK) {
proc_table[p].control |= CONTROL_CAP;
}
jet_update_endcycle(); /* Update the Jet data... */
proc_table[p].avail_time = proc_table[p].wcet;
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,
(unsigned short int)proc_table[p].context,(unsigned int)l);
return 0;
 
}
 
/* End the task and free the resources at the end of the period */
static void RM_public_end(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) {
if (!(td->flags & RM_FLAG_SPOR_LATE)) {
/* remove the deadline timer (if any) */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
proc_table[p].status = RM_ZOMBIE;
} else {
517,13 → 547,17
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
lev->U -= (MAX_BANDWIDTH/td->rdeadline) * proc_table[p].wcet;
}
}
 
/**** Private generic kernel interface functions (guest calls) ****/
 
/* Insert a guest task */
static void RM_private_insert(LEVEL l, PID p, TASK_MODEL *m)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
JOB_TASK_MODEL *job;
 
if (m->pclass != JOB_PCLASS || (m->level != 0 && m->level != l) ) {
535,22 → 569,22
 
/* Insert task in the correct position */
*iq_query_timespec(p, &lev->ready) = job->deadline;
/* THIS IS QUESTIONABLE!! rel deadline? */
*iq_query_priority(p, &lev->ready) = lev->period[p] = job->period;
 
*iq_query_priority(p, &lev->ready) = job->period;
/* THIS IS QUESTIONABLE!! relative deadline? */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
lev->dl_timer[p] = -1;
td->dl_timer = -1;
 
lev->period[p] = job->period;
td->period = job->period;
 
if (!job->noraiseexc) {
lev->dl_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
td->dl_timer = kern_event_post(iq_query_timespec(p, &lev->ready),
RM_timer_guest_deadline,(void *)p);
}
}
 
/* Dispatch a guest task */
static void RM_private_dispatch(LEVEL l, PID p, int nostop)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
561,6 → 595,7
iq_extract(p, &lev->ready);
}
 
/* Called when a guest task is preempted/out of budget */
static void RM_private_epilogue(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
570,30 → 605,30
proc_table[p].status = RM_READY;
}
 
/* Extract a guest task */
static void RM_private_extract(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
if (proc_table[p].status == RM_READY)
iq_extract(p, &lev->ready);
 
/* we remove the deadline timer, because the slice is finished */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
 
}
 
 
/**** Level registration function ****/
 
/* Registration function:
int flags the init flags ... see rm.h */
LEVEL RM_register_level(int flags)
{
LEVEL l; /* the level that we register */
RM_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("RM_register_level\n");
 
624,24 → 659,25
lev->l.public_block = RM_public_block;
lev->l.public_message = RM_public_message;
 
/* fill the RM descriptor part */
for(i=0; i<MAX_PROC; i++) {
lev->period[i] = 0;
lev->dl_timer[i] = -1;
lev->taskflags[i] = 0;
lev->dl_miss[i] = 0;
lev->wcet_miss[i] = 0;
lev->nact[i] = 0;
lev->nskip[i] = 0;
}
iq_init(&lev->ready, &freedesc, 0);
 
iq_init(&lev->ready, &freedesc, 0);
lev->flags = flags;
lev->U = 0;
if (lev->flags & RM_ENABLE_WCET_EXCEPTION) {
lev->flags |= RM_ENABLE_WCET_CHECK;
}
if (lev->flags & RM_ENABLE_DL_EXCEPTION) {
lev->flags |= RM_ENABLE_DL_CHECK;
}
 
lev->U = 0;
 
return l;
}
 
 
/**** Public utility functions ****/
 
/* Get the bandwidth used by the level */
bandwidth_t RM_usedbandwidth(LEVEL l)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
649,35 → 685,44
return lev->U;
}
 
int RM_get_nact(PID p)
/* Get the number of missed deadlines for a task */
int RM_get_dl_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
return lev->nact[p];
RM_task_des *td = &lev->tvec[p];
 
return td->dl_miss;
}
 
int RM_get_dl_miss(PID p)
/* Get the number of execution overruns for a task */
int RM_get_wcet_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
return lev->dl_miss[p];
return td->wcet_miss;
}
 
int RM_get_wcet_miss(PID p)
/* Get the number of skipped activations for a task */
int RM_get_act_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
return lev->wcet_miss[p];
RM_task_des *td = &lev->tvec[p];
return td->act_miss;
}
 
int RM_get_nskip(PID p)
/* Get the current number of queued activations for a task */
int RM_get_nact(PID p)
{
LEVEL l = proc_table[p].task_level;
 
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
return lev->nskip[p];
return td->nact;
}