20,11 → 20,11 |
|
/** |
------------ |
CVS : $Id: rm.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: rm.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
|
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
|
This file contains the scheduling module RM (Rate Monotonic) |
41,7 → 41,7 |
**/ |
|
/* |
* Copyright (C) 2000 Paolo Gai |
* Copyright (C) 2000,2002 Paolo Gai |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
71,7 → 71,6 |
|
/*+ Status used in the level +*/ |
#define RM_READY MODULE_STATUS_BASE /*+ - Ready status +*/ |
#define RM_DELAY MODULE_STATUS_BASE+1 /*+ - Delay status +*/ |
#define RM_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/ |
#define RM_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/ |
#define RM_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/ |
94,7 → 93,7 |
/*+ used to manage the JOB_TASK_MODEL and the |
periodicity +*/ |
|
QUEUE ready; /*+ the ready queue +*/ |
IQUEUE ready; /*+ the ready queue +*/ |
|
int flags; /*+ the init flags... +*/ |
|
103,28 → 102,12 |
} RM_level_des; |
|
|
static char *RM_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
|
switch (status) { |
case RM_READY : return "RM_Ready"; |
case RM_DELAY : return "RM_Delay"; |
case RM_WCET_VIOLATED: return "RM_Wcet_Violated"; |
case RM_WAIT : return "RM_Sporadic_Wait"; |
case RM_IDLE : return "RM_Idle"; |
case RM_ZOMBIE : return "RM_Zombie"; |
default : return "RM_Unknown"; |
} |
} |
|
static void RM_timer_deadline(void *par) |
{ |
PID p = (PID) par; |
RM_level_des *lev; |
struct timespec *temp; |
|
|
lev = (RM_level_des *)level_table[proc_table[p].task_level]; |
|
switch (proc_table[p].status) { |
131,7 → 114,7 |
case RM_ZOMBIE: |
/* we finally put the task in the ready queue */ |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
iq_insertfirst(p,&freedesc); |
/* and free the allocated bandwidth */ |
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet; |
break; |
140,12 → 123,11 |
/* tracer stuff */ |
trc_logevent(TRC_INTACTIVATION,&p); |
/* similar to RM_task_activate */ |
TIMESPEC_ASSIGN(&proc_table[p].request_time, |
&proc_table[p].timespec_priority); |
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority); |
temp = iq_query_timespec(p, &lev->ready); |
ADDUSEC2TIMESPEC(lev->period[p], temp); |
proc_table[p].status = RM_READY; |
q_insert(p,&lev->ready); |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
iq_priority_insert(p,&lev->ready); |
lev->deadline_timer[p] = kern_event_post(temp, |
RM_timer_deadline, |
(void *)p); |
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority ); |
173,112 → 155,16 |
kern_raise(XDEADLINE_MISS,p); |
} |
|
/*+ this function is called when a task finish his delay +*/ |
static void RM_timer_delay(void *par) |
{ |
PID p = (PID) par; |
RM_level_des *lev; |
|
lev = (RM_level_des *)level_table[proc_table[p].task_level]; |
|
proc_table[p].status = RM_READY; |
q_insert(p,&lev->ready); |
|
proc_table[p].delay_timer = NIL; /* Paranoia */ |
|
event_need_reschedule(); |
} |
|
|
static int RM_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) { |
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m; |
|
if (h->wcet && h->mit) |
return 0; |
} |
|
return -1; |
} |
|
static int RM_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == JOB_PCLASS || m->pclass == (JOB_PCLASS | l)) |
return 0; |
else |
return -1; |
} |
|
|
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
|
static void RM_level_status(LEVEL l) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
PID p = lev->ready; |
|
kern_printf("Wcet Check : %s\n", |
onoff(lev->flags & RM_ENABLE_WCET_CHECK)); |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & RM_ENABLE_GUARANTEE)); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
|
while (p != NIL) { |
if ((proc_table[p].pclass) == JOB_PCLASS) |
kern_printf("Pid: %2d (GUEST)\n", p); |
else |
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n", |
p, |
proc_table[p].name, |
lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period ", |
lev->period[p], |
proc_table[p].timespec_priority.tv_sec, |
proc_table[p].timespec_priority.tv_nsec/1000, |
RM_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
|
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != RM_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n", |
p, |
proc_table[p].name, |
lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period ", |
lev->period[p], |
proc_table[p].timespec_priority.tv_sec, |
proc_table[p].timespec_priority.tv_nsec/1000, |
RM_status_to_a(proc_table[p].status)); |
} |
|
/* The scheduler only gets the first task in the queue */ |
static PID RM_level_scheduler(LEVEL l) |
static PID RM_public_scheduler(LEVEL l) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
/* { // print 4 dbg the ready queue |
PID p= lev->ready; |
kern_printf("(s"); |
while (p != NIL) { |
kern_printf("%d ",p); |
p = proc_table[p].next; |
} |
kern_printf(") "); |
} |
*/ |
return (PID)lev->ready; |
return iq_query_first(&lev->ready); |
} |
|
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int RM_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int RM_public_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
296,16 → 182,19 |
|
} |
|
static int RM_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int RM_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
/* if the RM_task_create is called, then the pclass must be a |
valid pclass. */ |
HARD_TASK_MODEL *h; |
|
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m; |
if (m->pclass != HARD_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
h = (HARD_TASK_MODEL *)m; |
if (!h->wcet || !h->mit) return -1; |
/* now we know that m is a valid model */ |
|
proc_table[p].priority = lev->period[p] = h->mit; |
*iq_query_priority(p, &lev->ready) = lev->period[p] = h->mit; |
|
if (h->periodicity == APERIODIC) |
lev->flag[p] = RM_FLAG_SPORADIC; |
347,7 → 236,7 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
|
static void RM_task_detach(LEVEL l, PID p) |
static void RM_public_detach(LEVEL l, PID p) |
{ |
/* the RM level doesn't introduce any dinamic allocated new field. |
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated |
361,21 → 250,8 |
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet; |
} |
|
static int RM_task_eligible(LEVEL l, PID p) |
static void RM_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
|
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
|
static void RM_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
// kern_printf("(disp %d)",p); |
383,20 → 259,10 |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
q_extract(p, &lev->ready); |
|
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
iq_extract(p, &lev->ready); |
} |
|
static void RM_task_epilogue(LEVEL l, PID p) |
static void RM_public_epilogue(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
410,14 → 276,15 |
} |
else { |
/* the task has been preempted. it returns into the ready queue... */ |
q_insert(p,&lev->ready); |
iq_priority_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
} |
} |
|
static void RM_task_activate(LEVEL l, PID p) |
static void RM_public_activate(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
struct timespec *temp; |
|
if (proc_table[p].status == RM_WAIT) { |
kern_raise(XACTIVATION,p); |
432,35 → 299,33 |
|
|
/* see also RM_timer_deadline */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
temp = iq_query_timespec(p, &lev->ready); |
kern_gettime(temp); |
ADDUSEC2TIMESPEC(lev->period[p], temp); |
|
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, |
&proc_table[p].request_time); |
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority); |
|
/* Insert task in the correct position */ |
proc_table[p].status = RM_READY; |
q_insert(p,&lev->ready); |
iq_priority_insert(p,&lev->ready); |
|
/* Set the deadline timer */ |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
lev->deadline_timer[p] = kern_event_post(temp, |
RM_timer_deadline, |
(void *)p); |
} |
|
static void RM_task_insert(LEVEL l, PID p) |
static void RM_public_unblock(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
/* Similar to RM_task_activate, but we don't check in what state |
the task is and we don't set the request_time*/ |
/* Similar to RM_task_activate, |
but we don't check in what state the task is */ |
|
/* Insert task in the correct position */ |
proc_table[p].status = RM_READY; |
q_insert(p,&lev->ready); |
iq_priority_insert(p,&lev->ready); |
} |
|
static void RM_task_extract(LEVEL l, PID p) |
static void RM_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
473,7 → 338,7 |
*/ |
} |
|
static void RM_task_endcycle(LEVEL l, PID p) |
static int RM_public_message(LEVEL l, PID p, void *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
487,14 → 352,17 |
if (lev->flags & RM_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
|
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
|
/* when the deadline timer fire, it recognize the situation and set |
correctly all the stuffs (like reactivation, request_time, etc... ) */ |
correctly all the stuffs (like reactivation, sleep, etc... ) */ |
|
return 0; |
} |
|
static void RM_task_end(LEVEL l, PID p) |
static void RM_public_end(LEVEL l, PID p) |
{ |
// RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
proc_table[p].status = RM_ZOMBIE; |
|
/* When the deadline timer fire, it put the task descriptor in |
501,183 → 369,81 |
the free queue, and free the allocated bandwidth... */ |
} |
|
static void RM_task_sleep(LEVEL l, PID p) |
static void RM_private_insert(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
JOB_TASK_MODEL *job; |
|
/* the task has terminated his job before it consume the wcet. All OK! */ |
proc_table[p].status = RM_WAIT; |
if (m->pclass != JOB_PCLASS || (m->level != 0 && m->level != l) ) { |
kern_raise(XINVALID_TASK, p); |
return; |
} |
|
/* we reset the capacity counters... */ |
if (lev->flags & RM_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
job = (JOB_TASK_MODEL *)m; |
|
/* when the deadline timer fire, it recognize the situation and set |
correctly the task state to sleep... */ |
} |
|
static void RM_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
struct timespec wakeuptime; |
// RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
/* equal to RM_task_endcycle */ |
proc_table[p].status = RM_DELAY; |
|
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT, &wakeuptime); |
ADDUSEC2TIMESPEC(usdelay, &wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
RM_timer_delay, |
(void *)p); |
} |
|
/* Guest Functions |
These functions manages a JOB_TASK_MODEL, that is used to put |
a guest task in the RM ready queue. */ |
|
static int RM_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
JOB_TASK_MODEL *job = (JOB_TASK_MODEL *)m; |
|
/* if the RM_guest_create is called, then the pclass must be a |
valid pclass. */ |
|
*iq_query_timespec(p,&lev->ready) = job->deadline; |
*iq_query_priority(p, &lev->ready) = lev->period[p] = job->period; |
|
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &job->deadline); |
|
lev->deadline_timer[p] = -1; |
|
/* Insert task in the correct position */ |
iq_priority_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
|
if (job->noraiseexc) |
lev->flag[p] = RM_FLAG_NORAISEEXC; |
else |
else { |
lev->flag[p] = 0; |
|
proc_table[p].priority = lev->period[p] = job->period; |
|
/* there is no bandwidth guarantee at this level, it is performed |
by the level that inserts guest tasks... */ |
|
return 0; /* OK, also if the task cannot be guaranteed... */ |
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready), |
RM_timer_guest_deadline, |
(void *)p); |
} |
} |
|
static void RM_guest_detach(LEVEL l, PID p) |
static void RM_private_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the RM level doesn't introduce any dinamic allocated new field. |
No guarantee is performed on guest tasks... so we don't have to reset |
the NO_GUARANTEE FIELD */ |
} |
|
static void RM_guest_dispatch(LEVEL l, PID p, int nostop) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
/* the task state is set to EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
q_extract(p, &lev->ready); |
iq_extract(p, &lev->ready); |
} |
|
static void RM_guest_epilogue(LEVEL l, PID p) |
static void RM_private_epilogue(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
/* the task has been preempted. it returns into the ready queue... */ |
q_insert(p,&lev->ready); |
iq_priority_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
} |
|
static void RM_guest_activate(LEVEL l, PID p) |
static void RM_private_extract(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
/* Insert task in the correct position */ |
q_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
|
/* Set the deadline timer */ |
if (!(lev->flag[p] & RM_FLAG_NORAISEEXC)) |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
RM_timer_guest_deadline, |
(void *)p); |
|
} |
|
static void RM_guest_insert(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
/* Insert task in the correct position */ |
q_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
} |
|
static void RM_guest_extract(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
. the state of the task is set by the calling function |
. the deadline must remain... |
|
So, we do nothing!!! |
*/ |
} |
|
static void RM_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
|
static void RM_guest_end(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
//kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
if (proc_table[p].status == RM_READY) |
{ |
q_extract(p, &lev->ready); |
iq_extract(p, &lev->ready); |
//kern_printf("(g_end rdy extr)"); |
} |
else if (proc_table[p].status == RM_DELAY) { |
event_delete(proc_table[p].delay_timer); |
proc_table[p].delay_timer = NIL; /* paranoia */ |
} |
|
/* we remove the deadline timer, because the slice is finished */ |
if (lev->deadline_timer[p] != NIL) { |
// kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
event_delete(lev->deadline_timer[p]); |
kern_event_delete(lev->deadline_timer[p]); |
lev->deadline_timer[p] = NIL; |
} |
|
} |
|
static void RM_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
|
static void RM_guest_delay(LEVEL l, PID p, TIME usdelay) |
{ |
struct timespec wakeuptime; |
// RM_level_des *lev = (RM_level_des *)(level_table[l]); |
|
/* equal to RM_task_endcycle */ |
proc_table[p].status = RM_DELAY; |
|
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT, &wakeuptime); |
ADDUSEC2TIMESPEC(usdelay, &wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
RM_timer_delay, |
(void *)p); |
} |
|
|
|
|
/* Registration functions */ |
|
/*+ Registration function: |
int flags the init flags ... see rm.h +*/ |
void RM_register_level(int flags) |
LEVEL RM_register_level(int flags) |
{ |
LEVEL l; /* the level that we register */ |
RM_level_des *lev; /* for readableness only */ |
686,56 → 452,34 |
printk("RM_register_level\n"); |
|
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(RM_level_des)); |
|
/* alloc the space needed for the RM_level_des */ |
lev = (RM_level_des *)kern_alloc(sizeof(RM_level_des)); |
lev = (RM_level_des *)level_table[l]; |
|
printk(" lev=%d\n",(int)lev); |
|
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
|
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, RM_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = RM_LEVEL_CODE; |
lev->l.level_version = RM_LEVEL_VERSION; |
lev->l.private_insert = RM_private_insert; |
lev->l.private_extract = RM_private_extract; |
lev->l.private_dispatch = RM_private_dispatch; |
lev->l.private_epilogue = RM_private_epilogue; |
|
lev->l.level_accept_task_model = RM_level_accept_task_model; |
lev->l.level_accept_guest_model = RM_level_accept_guest_model; |
lev->l.level_status = RM_level_status; |
lev->l.level_scheduler = RM_level_scheduler; |
|
lev->l.public_scheduler = RM_public_scheduler; |
if (flags & RM_ENABLE_GUARANTEE) |
lev->l.level_guarantee = RM_level_guarantee; |
lev->l.public_guarantee = RM_public_guarantee; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
|
lev->l.task_create = RM_task_create; |
lev->l.task_detach = RM_task_detach; |
lev->l.task_eligible = RM_task_eligible; |
lev->l.task_dispatch = RM_task_dispatch; |
lev->l.task_epilogue = RM_task_epilogue; |
lev->l.task_activate = RM_task_activate; |
lev->l.task_insert = RM_task_insert; |
lev->l.task_extract = RM_task_extract; |
lev->l.task_endcycle = RM_task_endcycle; |
lev->l.task_end = RM_task_end; |
lev->l.task_sleep = RM_task_sleep; |
lev->l.task_delay = RM_task_delay; |
lev->l.public_create = RM_public_create; |
lev->l.public_detach = RM_public_detach; |
lev->l.public_end = RM_public_end; |
lev->l.public_dispatch = RM_public_dispatch; |
lev->l.public_epilogue = RM_public_epilogue; |
lev->l.public_activate = RM_public_activate; |
lev->l.public_unblock = RM_public_unblock; |
lev->l.public_block = RM_public_block; |
lev->l.public_message = RM_public_message; |
|
lev->l.guest_create = RM_guest_create; |
lev->l.guest_detach = RM_guest_detach; |
lev->l.guest_dispatch = RM_guest_dispatch; |
lev->l.guest_epilogue = RM_guest_epilogue; |
lev->l.guest_activate = RM_guest_activate; |
lev->l.guest_insert = RM_guest_insert; |
lev->l.guest_extract = RM_guest_extract; |
lev->l.guest_endcycle = RM_guest_endcycle; |
lev->l.guest_end = RM_guest_end; |
lev->l.guest_sleep = RM_guest_sleep; |
lev->l.guest_delay = RM_guest_delay; |
|
/* fill the RM descriptor part */ |
for(i=0; i<MAX_PROC; i++) { |
lev->period[i] = 0; |
743,18 → 487,17 |
lev->flag[i] = 0; |
} |
|
lev->ready = NIL; |
iq_init(&lev->ready, &freedesc, 0); |
lev->flags = flags & 0x07; |
lev->U = 0; |
|
return l; |
} |
|
bandwidth_t RM_usedbandwidth(LEVEL l) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
if (lev->l.level_code == RM_LEVEL_CODE && |
lev->l.level_version == RM_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
|
return lev->U; |
} |
|