20,24 → 20,17 |
|
/** |
------------ |
CVS : $Id: rm.c,v 1.8 2004-03-10 14:51:44 giacomo Exp $ |
CVS : $Id: rm.c,v 1.9 2004-05-17 15:03:52 anton Exp $ |
|
File: $File$ |
Revision: $Revision: 1.8 $ |
Last update: $Date: 2004-03-10 14:51:44 $ |
Revision: $Revision: 1.9 $ |
Last update: $Date: 2004-05-17 15:03:52 $ |
------------ |
|
This file contains the scheduling module RM (Rate Monotonic) |
This file contains the scheduling module RM (rate/deadline monotonic) |
|
Read rm.h for further details. |
|
This file is equal to EDF.c except for: |
|
. EDF changed to RM :-) |
. q_timespec_insert changed to q_insert |
. proc_table[p].priority is also modified when we modify lev->period[p] |
|
|
**/ |
|
/* |
67,91 → 60,174 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
|
#include <tracer.h> |
|
/*+ Status used in the level +*/ |
#define RM_READY MODULE_STATUS_BASE /*+ - Ready status +*/ |
#define RM_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/ |
#define RM_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/ |
#define RM_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/ |
#define RM_ZOMBIE MODULE_STATUS_BASE+5 /*+ to wait the free time +*/ |
//#define RM_DEBUG |
#define rm_printf kern_printf |
|
/*+ flags +*/ |
#define RM_FLAG_SPORADIC 1 |
#define RM_FLAG_NORAISEEXC 2 |
#ifdef RM_DEBUG |
/* some debug print functions */ |
char *pnow() { |
static char buf[40]; |
struct timespec t; |
sys_gettime(&t); |
sprintf(buf, "%ld.%06ld", t.tv_sec, t.tv_nsec/1000); |
return buf; |
} |
char *ptime1(struct timespec *t) { |
static char buf[40]; |
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000); |
return buf; |
} |
char *ptime2(struct timespec *t) { |
static char buf[40]; |
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000); |
return buf; |
} |
#endif |
|
/*+ the level redefinition for the Rate Monotonic +*/ |
typedef struct { |
level_des l; /*+ the standard level descriptor +*/ |
/* statuses used in the level */ |
#define RM_READY MODULE_STATUS_BASE /* ready */ |
#define RM_IDLE MODULE_STATUS_BASE+1 /* idle, waiting for offset/eop */ |
#define RM_WAIT MODULE_STATUS_BASE+2 /* to sleep, waiting for eop */ |
#define RM_ZOMBIE MODULE_STATUS_BASE+3 /* zombie, waiting for eop */ |
|
TIME period[MAX_PROC]; /*+ The task periods; the deadlines are |
stored in the priority field +*/ |
int deadline_timer[MAX_PROC]; |
/*+ The task deadline timers +*/ |
/* task flags */ |
#define RM_FLAG_SPORADIC 1 /* the task is sporadic */ |
#define RM_FLAG_SPOR_LATE 2 /* sporadic task with period overrun */ |
|
int flag[MAX_PROC]; |
/*+ used to manage the JOB_TASK_MODEL and the |
periodicity +*/ |
|
IQUEUE ready; /*+ the ready queue +*/ |
/* the level redefinition for the Earliest Deadline First level */ |
typedef struct { |
level_des l; /* standard level descriptor */ |
IQUEUE ready; /* the ready queue */ |
int flags; /* level flags */ |
bandwidth_t U; /* used bandwidth */ |
|
int flags; /*+ the init flags... +*/ |
int taskflags[MAX_PROC]; /* task flags */ |
TIME period[MAX_PROC]; /* task period */ |
TIME rdeadline[MAX_PROC]; /* task relative deadlines */ |
TIME offset[MAX_PROC]; /* task release offsets */ |
struct timespec release[MAX_PROC]; /* release time of the task */ |
int dl_timer[MAX_PROC]; /* deadline overrun timer */ |
int eop_timer[MAX_PROC]; /* end of period timer */ |
int dl_miss[MAX_PROC]; /* deadline miss counter */ |
int wcet_miss[MAX_PROC]; /* WCET miss counter */ |
int nact[MAX_PROC]; /* number of pending periodic jobs */ |
int nskip[MAX_PROC]; /* number of skipped sporadic jobs */ |
} RM_level_des; |
|
bandwidth_t U; /*+ the used bandwidth +*/ |
|
} RM_level_des; |
static void RM_timer_endperiod(void *par); |
|
|
/* This function is called when a task misses its deadline */ |
|
static void RM_timer_deadline(void *par) |
{ |
PID p = (PID) par; |
RM_level_des *lev; |
struct timespec *temp; |
lev = (RM_level_des *)level_table[proc_table[p].task_level]; |
|
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0); |
|
if (lev->flags & RM_ENABLE_DL_EXCEPTION) { |
kern_raise(XDEADLINE_MISS,p); |
} else { |
lev->dl_miss[p]++; |
} |
} |
|
|
/* Release (or queue) task, post deadline and endperiod timers. |
The release time is stored in lev->release[p]. */ |
|
static void RM_intern_release(PID p, RM_level_des *lev) |
{ |
struct timespec temp; |
|
/* post deadline timer */ |
if (lev->flags & RM_ENABLE_DL_CHECK) { |
temp = lev->release[p]; |
ADDUSEC2TIMESPEC(lev->rdeadline[p], &temp); |
lev->dl_timer[p] = kern_event_post(&temp,RM_timer_deadline,(void *)p); |
} |
|
/* release or queue next job */ |
if (proc_table[p].status == RM_IDLE) { |
/* assign deadline, insert task in the ready queue */ |
proc_table[p].status = RM_READY; |
*iq_query_priority(p,&lev->ready) = lev->rdeadline[p]; |
iq_priority_insert(p,&lev->ready); |
#ifdef RM_DEBUG |
rm_printf("At %s: releasing %s\n", pnow(), proc_table[p].name); |
#endif |
/* reschedule */ |
event_need_reschedule(); |
} else { |
/* queue */ |
lev->nact[p]++; |
} |
|
/* increase release time */ |
ADDUSEC2TIMESPEC(lev->period[p],&lev->release[p]); |
/* post end of period timer */ |
kern_event_post(&lev->release[p],RM_timer_endperiod,(void *)p); |
|
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level); |
} |
|
|
/* First release */ |
|
static void RM_timer_offset(void *par) |
{ |
PID p = (PID) par; |
RM_level_des *lev; |
lev = (RM_level_des *)level_table[proc_table[p].task_level]; |
|
switch (proc_table[p].status) { |
case RM_ZOMBIE: |
/* we finally put the task in the ready queue */ |
proc_table[p].status = FREE; |
iq_insertfirst(p,&freedesc); |
/* and free the allocated bandwidth */ |
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet; |
break; |
RM_intern_release(p, lev); |
} |
|
case RM_IDLE: |
/* tracer stuff */ |
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level); |
/* similar to RM_task_activate */ |
temp = iq_query_timespec(p, &lev->ready); |
ADDUSEC2TIMESPEC(lev->period[p], temp); |
proc_table[p].status = RM_READY; |
iq_priority_insert(p,&lev->ready); |
lev->deadline_timer[p] = kern_event_post(temp, |
RM_timer_deadline, |
(void *)p); |
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority ); |
event_need_reschedule(); |
break; |
|
case RM_WAIT: |
/* Without this, the task cannot be reactivated!!! */ |
proc_table[p].status = SLEEP; |
break; |
/* This function is called at the end of the period */ |
|
default: |
/* else, a deadline miss occurred!!! */ |
kern_printf("timer_deadline:AAARRRGGGHHH!!!"); |
kern_raise(XDEADLINE_MISS,p); |
static void RM_timer_endperiod(void *par) |
{ |
PID p = (PID) par; |
RM_level_des *lev; |
lev = (RM_level_des *)level_table[proc_table[p].task_level]; |
|
if (proc_table[p].status == RM_ZOMBIE) { |
/* put the task in the FREE state */ |
proc_table[p].status = FREE; |
iq_insertfirst(p,&freedesc); |
/* free the allocated bandwidth */ |
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet; |
return; |
} |
|
if (!(lev->taskflags[p] & RM_FLAG_SPORADIC)) { |
/* if the task is periodic, rerelease it (now or later) */ |
RM_intern_release(p, lev); |
} else { |
/* else check if the task is waiting for end of period */ |
if (proc_table[p].status == RM_WAIT) { |
proc_table[p].status = SLEEP; |
} else { |
/* the task is still busy. mark it as late */ |
lev->taskflags[p] |= RM_FLAG_SPOR_LATE; |
} |
} |
} |
|
|
/* This function is called when a guest task misses its deadline */ |
|
static void RM_timer_guest_deadline(void *par) |
{ |
PID p = (PID) par; |
|
kern_printf("AAARRRGGGHHH!!!"); |
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0); |
kern_raise(XDEADLINE_MISS,p); |
} |
|
159,7 → 235,6 |
static PID RM_public_scheduler(LEVEL l) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
return iq_query_first(&lev->ready); |
} |
|
179,7 → 254,6 |
static int RM_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
HARD_TASK_MODEL *h; |
|
if (m->pclass != HARD_PCLASS) return -1; |
186,49 → 260,68 |
if (m->level != 0 && m->level != l) return -1; |
h = (HARD_TASK_MODEL *)m; |
if (!h->wcet || !h->mit) return -1; |
if (h->drel > h->mit) return -1; /* only D <= T supported */ |
|
/* update the bandwidth... */ |
if (!h->drel) { |
lev->rdeadline[p] = h->mit; |
} else { |
lev->rdeadline[p] = h->drel; |
} |
|
/* check the free bandwidth... */ |
if (lev->flags & RM_ENABLE_GUARANTEE) { |
bandwidth_t b; |
b = (MAX_BANDWIDTH / h->mit) * h->wcet; |
b = (MAX_BANDWIDTH / lev->rdeadline[p]) * h->wcet; |
|
/* really update lev->U, checking an overflow... */ |
if (MAX_BANDWIDTH - lev->U > b) |
if (MAX_BANDWIDTH - lev->U > b) { |
lev->U += b; |
else |
} else { |
return -1; |
} |
} |
|
/* now we know that m is a valid model */ |
if (lev->flags & RM_ENABLE_WCET_EXCEPTION) { |
lev->flags |= RM_ENABLE_WCET_CHECK; |
} |
if (lev->flags & RM_ENABLE_DL_EXCEPTION) { |
lev->flags |= RM_ENABLE_DL_CHECK; |
} |
|
*iq_query_priority(p, &lev->ready) = lev->period[p] = h->mit; |
lev->period[p] = h->mit; |
if (lev->rdeadline[p] == lev->period[p]) { |
/* Ensure that D <= T-eps to make dl_timer trigger before rel_timer */ |
lev->rdeadline[p] = lev->period[p] - 1; |
} |
|
lev->taskflags[p] = 0; |
|
if (h->periodicity == APERIODIC) |
lev->flag[p] = RM_FLAG_SPORADIC; |
else |
lev->flag[p] = 0; |
lev->deadline_timer[p] = -1; |
lev->taskflags[p] |= RM_FLAG_SPORADIC; |
|
lev->dl_timer[p] = -1; |
lev->eop_timer[p] = -1; |
|
/* Enable wcet check */ |
if (lev->flags & RM_ENABLE_WCET_CHECK) { |
proc_table[p].avail_time = h->wcet; |
proc_table[p].wcet = h->wcet; |
proc_table[p].control |= CONTROL_CAP; |
proc_table[p].control |= CONTROL_CAP; /* turn on measurement */ |
} |
|
lev->offset[p] = h->offset; |
|
NULL_TIMESPEC(&lev->release[p]); |
|
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
|
static void RM_public_detach(LEVEL l, PID p) |
{ |
/* the RM level doesn't introduce any dinamic allocated new field. |
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated |
bandwidth */ |
|
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
if (lev->flags & RM_ENABLE_GUARANTEE) { |
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet; |
lev->U -= (MAX_BANDWIDTH / lev->rdeadline[p]) * proc_table[p].wcet; |
} |
} |
|
235,12 → 328,6 |
static void RM_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
// kern_printf("(disp %d)",p); |
|
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
iq_extract(p, &lev->ready); |
} |
|
248,51 → 335,63 |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
// kern_printf("(epil %d)",p); |
|
/* check if the wcet is finished... */ |
if ((lev->flags & RM_ENABLE_WCET_CHECK) && proc_table[p].avail_time <= 0) { |
/* if it is, raise a XWCET_VIOLATION exception */ |
kern_raise(XWCET_VIOLATION,p); |
proc_table[p].status = RM_WCET_VIOLATED; |
if (lev->flags & RM_ENABLE_WCET_CHECK) { |
if (proc_table[p].avail_time <= 0) { |
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,(unsigned short int)proc_table[p].context,0); |
if (lev->flags & RM_ENABLE_WCET_EXCEPTION) { |
kern_raise(XWCET_VIOLATION,p); |
} else { |
proc_table[p].control &= ~CONTROL_CAP; |
lev->wcet_miss[p]++; |
} |
} |
} |
else { |
/* the task has been preempted. it returns into the ready queue... */ |
iq_priority_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
} |
|
/* the task returns to the ready queue */ |
iq_priority_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
|
} |
|
static void RM_public_activate(LEVEL l, PID p) |
static void RM_public_activate(LEVEL l, PID p, struct timespec *t) |
{ |
struct timespec clocktime; |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
struct timespec *temp; |
|
if (proc_table[p].status == RM_WAIT) { |
kern_raise(XACTIVATION,p); |
kern_gettime(&clocktime); |
|
/* check if we are not in the SLEEP state */ |
if (proc_table[p].status != SLEEP) { |
if (lev->flags & RM_ENABLE_ACT_EXCEPTION) { |
/* too frequent or wrongful activation: raise exception */ |
kern_raise(XACTIVATION,p); |
} else { |
/* skip the sporadic job, but increase a counter */ |
#ifdef RM_DEBUG |
rm_printf("At %s: activation of %s skipped\n", pnow(), proc_table[p].name); |
#endif |
lev->nskip[p]++; |
} |
return; |
} |
|
/* set the release time to the activation time + offset */ |
lev->release[p] = *t; |
ADDUSEC2TIMESPEC(lev->offset[p], &lev->release[p]); |
|
/* Test if we are trying to activate a non sleeping task */ |
/* Ignore this; the task is already active */ |
if (proc_table[p].status != SLEEP && |
proc_table[p].status != RM_WCET_VIOLATED) |
return; |
/* Check if release > clocktime. If so, release it later, |
otherwise release it now. */ |
|
proc_table[p].status = RM_IDLE; |
|
/* see also RM_timer_deadline */ |
temp = iq_query_timespec(p, &lev->ready); |
kern_gettime(temp); |
ADDUSEC2TIMESPEC(lev->period[p], temp); |
|
/* Insert task in the correct position */ |
proc_table[p].status = RM_READY; |
iq_priority_insert(p,&lev->ready); |
|
/* Set the deadline timer */ |
lev->deadline_timer[p] = kern_event_post(temp, |
RM_timer_deadline, |
(void *)p); |
if (TIMESPEC_A_GT_B(&lev->release[p], &clocktime)) { |
/* release later */ |
kern_event_post(&lev->release[p],RM_timer_offset,(void *)p); |
} else { |
/* release now */ |
RM_intern_release(p, lev); |
} |
} |
|
static void RM_public_unblock(LEVEL l, PID p) |
299,9 → 398,6 |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
/* Similar to RM_task_activate, |
but we don't check in what state the task is */ |
|
/* Insert task in the correct position */ |
proc_table[p].status = RM_READY; |
iq_priority_insert(p,&lev->ready); |
324,31 → 420,105 |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
/* the task has terminated his job before it consume the wcet. All OK! */ |
if (lev->flag[p] & RM_FLAG_SPORADIC) |
proc_table[p].status = RM_WAIT; |
else /* pclass = sporadic_pclass */ |
proc_table[p].status = RM_IDLE; |
switch((long)(m)) { |
/* task_endcycle() */ |
case 0: |
/* if there are no pending jobs */ |
if (lev->nact[p] == 0) { |
/* remove deadline timer, if any */ |
if (lev->dl_timer[p] != -1) { |
kern_event_delete(lev->dl_timer[p]); |
lev->dl_timer[p] = -1; |
} |
if (lev->taskflags[p] & RM_FLAG_SPORADIC) { |
/* sporadic task */ |
if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) { |
proc_table[p].status = RM_WAIT; |
} else { |
/* it's late, move it directly to SLEEP */ |
proc_table[p].status = SLEEP; |
lev->taskflags[p] &= ~RM_FLAG_SPOR_LATE; |
} |
} else { |
/* periodic task */ |
proc_table[p].status = RM_IDLE; |
} |
} else { |
/* we are late / there are pending jobs */ |
lev->nact[p]--; |
*iq_query_priority(p,&lev->ready) = lev->rdeadline[p]; |
iq_priority_insert(p,&lev->ready); |
#ifdef RM_DEBUG |
rm_printf("(Late) At %s: releasing %s\n", |
pnow(), proc_table[p].name); |
#endif |
} |
break; |
|
/* task_sleep() */ |
case 1: |
/* remove deadline timer, if any */ |
if (lev->dl_timer[p] != -1) { |
kern_event_delete(lev->dl_timer[p]); |
lev->dl_timer[p] = -1; |
} |
if (lev->taskflags[p] & RM_FLAG_SPORADIC) { |
/* sporadic task */ |
if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) { |
proc_table[p].status = RM_WAIT; |
} else { |
/* it's late, move it directly to SLEEP */ |
proc_table[p].status = SLEEP; |
lev->taskflags[p] &= ~RM_FLAG_SPOR_LATE; |
} |
} else { |
/* periodic task */ |
if (!(lev->nact[p] > 0)) { |
/* we are on time. go to the RM_WAIT state */ |
proc_table[p].status = RM_WAIT; |
} else { |
/* we are late. delete pending activations and go to SLEEP */ |
lev->nact[p] = 0; |
proc_table[p].status = SLEEP; |
/* remove end of period timer */ |
if (lev->eop_timer[p] != -1) { |
kern_event_delete(lev->eop_timer[p]); |
lev->eop_timer[p] = -1; |
} |
} |
} |
break; |
} |
|
/* we reset the capacity counters... */ |
if (lev->flags & RM_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
|
if (lev->flags & RM_ENABLE_WCET_CHECK) { |
proc_table[p].control |= CONTROL_CAP; |
} |
proc_table[p].avail_time = proc_table[p].wcet; |
jet_update_endcycle(); /* Update the Jet data... */ |
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l); |
|
/* when the deadline timer fire, it recognize the situation and set |
correctly all the stuffs (like reactivation, sleep, etc... ) */ |
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l); |
|
return 0; |
|
} |
|
static void RM_public_end(LEVEL l, PID p) |
{ |
proc_table[p].status = RM_ZOMBIE; |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
/* When the deadline timer fire, it put the task descriptor in |
the free queue, and free the allocated bandwidth... */ |
if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) { |
/* remove the deadline timer (if any) */ |
if (lev->dl_timer[p] != -1) { |
kern_event_delete(lev->dl_timer[p]); |
lev->dl_timer[p] = -1; |
} |
proc_table[p].status = RM_ZOMBIE; |
} else { |
/* no endperiod timer will be fired, free the task now! */ |
proc_table[p].status = FREE; |
iq_insertfirst(p,&freedesc); |
/* free the allocated bandwidth */ |
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet; |
} |
} |
|
static void RM_private_insert(LEVEL l, PID p, TASK_MODEL *m) |
363,22 → 533,21 |
|
job = (JOB_TASK_MODEL *)m; |
|
*iq_query_timespec(p,&lev->ready) = job->deadline; |
/* Insert task in the correct position */ |
*iq_query_timespec(p, &lev->ready) = job->deadline; |
/* THIS IS QUESTIONABLE!! rel deadline? */ |
*iq_query_priority(p, &lev->ready) = lev->period[p] = job->period; |
|
lev->deadline_timer[p] = -1; |
|
/* Insert task in the correct position */ |
iq_priority_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
|
lev->dl_timer[p] = -1; |
|
if (job->noraiseexc) |
lev->flag[p] = RM_FLAG_NORAISEEXC; |
else { |
lev->flag[p] = 0; |
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready), |
RM_timer_guest_deadline, |
(void *)p); |
lev->period[p] = job->period; |
|
if (!job->noraiseexc) { |
lev->dl_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready), |
RM_timer_guest_deadline,(void *)p); |
} |
} |
|
405,26 → 574,21 |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
//kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
if (proc_table[p].status == RM_READY) |
{ |
iq_extract(p, &lev->ready); |
//kern_printf("(g_end rdy extr)"); |
} |
|
/* we remove the deadline timer, because the slice is finished */ |
if (lev->deadline_timer[p] != NIL) { |
// kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
kern_event_delete(lev->deadline_timer[p]); |
lev->deadline_timer[p] = NIL; |
if (lev->dl_timer[p] != -1) { |
kern_event_delete(lev->dl_timer[p]); |
lev->dl_timer[p] = -1; |
} |
|
} |
|
/* Registration functions */ |
|
/*+ Registration function: |
int flags the init flags ... see rm.h +*/ |
|
/* Registration function: |
int flags the init flags ... see rm.h */ |
LEVEL RM_register_level(int flags) |
{ |
LEVEL l; /* the level that we register */ |
462,9 → 626,13 |
|
/* fill the RM descriptor part */ |
for(i=0; i<MAX_PROC; i++) { |
lev->period[i] = 0; |
lev->deadline_timer[i] = -1; |
lev->flag[i] = 0; |
lev->period[i] = 0; |
lev->dl_timer[i] = -1; |
lev->taskflags[i] = 0; |
lev->dl_miss[i] = 0; |
lev->wcet_miss[i] = 0; |
lev->nact[i] = 0; |
lev->nskip[i] = 0; |
} |
|
iq_init(&lev->ready, &freedesc, 0); |
481,3 → 649,35 |
return lev->U; |
} |
|
int RM_get_nact(PID p) |
{ |
LEVEL l = proc_table[p].task_level; |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
return lev->nact[p]; |
} |
|
int RM_get_dl_miss(PID p) |
{ |
LEVEL l = proc_table[p].task_level; |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
return lev->dl_miss[p]; |
} |
|
int RM_get_wcet_miss(PID p) |
{ |
LEVEL l = proc_table[p].task_level; |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
return lev->wcet_miss[p]; |
} |
|
int RM_get_nskip(PID p) |
{ |
LEVEL l = proc_table[p].task_level; |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
return lev->nskip[p]; |
} |
|