Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 706 → Rev 707

/shark/trunk/kernel/modules/elastic.c
45,13 → 45,40
 
#include <tracer.h>
 
#define ELASTIC_EMPTY_SLOT 0
 
/* Task flags */
 
#define ELASTIC_PRESENT 1
#define ELASTIC_JOB_PRESENT 2
 
/* Task statuses */
 
#define ELASTIC_IDLE APER_STATUS_BASE
 
//#define ELASTIC_DEBUG
 
#define ELASTIC_DEBUG
 
#ifdef ELASTIC_DEBUG
char *pnow() {
static char buf[40];
struct timespec t;
kern_gettime(&t);
sprintf(buf, "%ld.%06ld", t.tv_sec, t.tv_nsec/1000);
return buf;
}
char *ptime1(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
char *ptime2(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
#endif
 
 
typedef struct {
 
/* Task parameters (set/changed by the user) */
64,13 → 91,16
 
/* Task variables (changed by the module) */
 
struct timespec dline; /* The current absolute deadline */
struct timespec release; /* The current activation time */
struct timespec dline; /* The current absolute deadline */
int dltimer; /* Deadline timer handle */
ext_bandwidth_t Umax; /* The maximum utilization, Umax = C/Tmin */
ext_bandwidth_t Umin; /* The minimum utilization, Umin = C/Tmax */
 
ext_bandwidth_t U; /* The current utilization */
TIME T; /* The current period, T = C/U */
ext_bandwidth_t U; /* New assigned utilization */
ext_bandwidth_t oldU; /* Old utilization */
TIME T; /* The current period, T = C/U */
 
int flags;
 
92,12 → 122,58
} ELASTIC_level_des;
 
 
/* Checks feasability and computes new utilizations for the task set */
static void ELASTIC_activation(ELASTIC_level_des *lev, PID p,
struct timespec *acttime)
{
JOB_TASK_MODEL job;
ELASTIC_task_descr *et = &lev->elist[p];
 
/* Assign release time */
et->release = *acttime;
 
/* Assign absolute deadline */
et->dline = *acttime;
ADDUSEC2TIMESPEC(et->T, &et->dline);
 
#ifdef ELASTIC_DEBUG
cprintf("At %s: activating %s; rel=%s; dl=%s\n", pnow(), proc_table[p].name,
ptime1(&et->release), ptime2(&et->dline));
#endif
 
proc_table[p].avail_time = et->C;
proc_table[p].wcet = et->C;
 
/* Job insertion */
job_task_default_model(job, et->dline);
level_table[lev->scheduling_level]->
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job);
}
 
 
static void ELASTIC_timer_act(void *arg) {
 
PID p = (PID)(arg);
ELASTIC_level_des *lev;
lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
ELASTIC_task_descr *et = &lev->elist[p];
 
/* Use the current deadline as the new activation time */
ELASTIC_activation(lev, p, &et->dline);
 
event_need_reschedule();
 
/* Next activation */
et->dltimer = kern_event_post(&et->dline, ELASTIC_timer_act, (void *)(p));
}
 
 
/* Check feasability and compute new utilizations for the task set */
 
static int ELASTIC_compress(ELASTIC_level_des *lev) {
 
PID i;
ELASTIC_task_descr *t;
ELASTIC_task_descr *et;
int ok;
ext_bandwidth_t Umin; // minimum utilization
106,20 → 182,21
ext_bandwidth_t Uf; // amount of non-compressable utilization
int Ev; // sum of elasticity among compressable tasks
 
JOB_TASK_MODEL job;
 
Umin = 0;
Umax = 0;
 
for (i=0; i<MAX_PROC; i++) {
t = &lev->elist[i];
if (t->flags & ELASTIC_PRESENT) {
if (t->E == 0) {
Umin += t->U;
Umax += t->U;
et = &lev->elist[i];
if (et->flags & ELASTIC_PRESENT) {
if (et->E == 0) {
Umin += et->U;
Umax += et->U;
} else {
Umin += t->Umin;
Umax += t->Umax;
t->U = t->Umax; // reset previous saturations
t->T = ((long long)t->C * (long long)MAX_BANDWIDTH) / t->U;
Umin += et->Umin;
Umax += et->Umax;
et->U = et->Umax; // reset previous saturations (if any)
}
}
}
134,13 → 211,13
Umax = 0;
 
for (i=0; i<MAX_PROC; i++) {
t = &lev->elist[i];
if (t->flags & ELASTIC_PRESENT) {
if (t->E == 0 || t->U == t->Umin) {
Uf += t->U;
et = &lev->elist[i];
if (et->flags & ELASTIC_PRESENT) {
if (et->E == 0 || et->U == et->Umin) {
Uf += et->U;
} else {
Ev += t->E;
Umax += t->Umax;
Ev += et->E;
Umax += et->Umax;
}
}
}
148,15 → 225,14
ok = 1;
for (i=0; i<MAX_PROC; i++) {
t = &lev->elist[i];
if (t->flags & ELASTIC_PRESENT) {
if (t->E > 0 && t->U > t->Umin) {
t->U = t->Umax - (Umax - lev->U + Uf) * t->E / Ev;
if (t->U < t->Umin) {
t->U = t->Umin;
et = &lev->elist[i];
if (et->flags & ELASTIC_PRESENT) {
if (et->E > 0 && et->U > et->Umin) {
et->U = et->Umax - (Umax - lev->U + Uf) * et->E / Ev;
if (et->U < et->Umin) {
et->U = et->Umin;
ok = 0;
}
t->T = ((long long)t->C * (long long)MAX_BANDWIDTH) / t->U;
}
}
}
163,14 → 239,52
 
} while (ok == 0);
 
// Increase periods of compressed tasks IMMEDIATELY.
// The other ones will be changed at their next activation
 
for (i=0; i<MAX_PROC; i++) {
et = &lev->elist[i];
if (et->flags & ELASTIC_PRESENT) {
if (et->U != et->oldU) {
/* Utilization has been changed. Compute new period */
et->T = ((long long)et->C * (long long)MAX_BANDWIDTH) / et->U;
}
if (et->U < et->oldU) {
/* Task has been compressed. Change its deadline NOW! */
if (et->flags & ELASTIC_JOB_PRESENT) {
/* Remove job from level and change its deadline */
level_table[lev->scheduling_level]->
private_extract(lev->scheduling_level, i);
}
/* Compute new deadline */
et->dline = et->release;
ADDUSEC2TIMESPEC(et->T, &et->dline);
if (et->dltimer != -1) {
/* Delete old deadline timer, post new one */
kern_event_delete(et->dltimer);
et->dltimer = kern_event_post(&et->dline, ELASTIC_timer_act,(void *)(i));
}
if (et->flags & ELASTIC_JOB_PRESENT) {
/* Reinsert job */
job_task_default_model(job, et->dline);
level_table[lev->scheduling_level]->
private_insert(lev->scheduling_level, i, (TASK_MODEL *)&job);
}
}
et->oldU = et->U; /* Update oldU */
}
}
 
#ifdef ELASTIC_DEBUG
cprintf("New periods: ");
for (i=0; i<MAX_PROC; i++) {
t = &lev->elist[i];
if (t->flags & ELASTIC_PRESENT) {
cprintf("%s:%d ", proc_table[i].name, t->T);
et = &lev->elist[i];
if (et->flags & ELASTIC_PRESENT) {
cprintf("%s:%d ", proc_table[i].name, (int)et->T);
}
}
cprintf("\n");
#endif
 
return 0; // FEASIBLE
 
177,49 → 291,6
}
 
 
static void ELASTIC_activation(ELASTIC_level_des *lev,
PID p,
struct timespec *acttime)
{
JOB_TASK_MODEL job;
 
/* Job deadline */
TIMESPEC_ASSIGN(&(lev->elist[p].dline),acttime);
ADDUSEC2TIMESPEC(lev->elist[p].T,&(lev->elist[p].dline));
 
proc_table[p].avail_time = lev->elist[p].C;
proc_table[p].wcet = lev->elist[p].C;
 
/* Job insertion */
job_task_default_model(job, lev->elist[p].dline);
level_table[ lev->scheduling_level ]->
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job);
 
}
 
static void ELASTIC_timer_act(void *arg) {
 
PID p = (PID)(arg);
ELASTIC_level_des *lev;
struct timespec acttime;
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:Timer:%d)",p);
#endif
 
kern_gettime(&acttime);
 
lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
 
ELASTIC_activation(lev, p, &acttime);
 
event_need_reschedule();
 
/* Next activation */
kern_event_post(&(lev->elist[p].dline), ELASTIC_timer_act, (void *)(p));
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int ELASTIC_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
240,6 → 311,7
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
ELASTIC_TASK_MODEL *elastic = (ELASTIC_TASK_MODEL *)m;
ELASTIC_task_descr *et = &lev->elist[p];
 
if (m->pclass != ELASTIC_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
247,29 → 319,23
if (elastic->C == 0) return -1;
if (elastic->Tmin > elastic->Tmax) return -1;
if (elastic->Tmax == 0) return -1;
if (elastic->Tmin == 0) return -1;
 
lev->elist[p].flags |= ELASTIC_PRESENT;
NULL_TIMESPEC(&(et->dline));
et->Tmin = elastic->Tmin;
et->Tmax = elastic->Tmax;
et->C = elastic->C;
et->E = elastic->E;
et->beta = elastic->beta;
 
NULL_TIMESPEC(&(lev->elist[p].dline));
lev->elist[p].Tmin = elastic->Tmin;
lev->elist[p].Tmax = elastic->Tmax;
lev->elist[p].C = elastic->C;
lev->elist[p].E = elastic->E;
lev->elist[p].beta = elastic->beta;
et->Umax = ((long long)MAX_BANDWIDTH * (long long)elastic->C) / elastic->Tmin;
et->Umin = ((long long)MAX_BANDWIDTH * (long long)elastic->C) / elastic->Tmax;
 
lev->elist[p].Umax = ((long long)MAX_BANDWIDTH * (long long)elastic->C)
/ elastic->Tmin;
lev->elist[p].Umin = ((long long)MAX_BANDWIDTH * (long long)elastic->C)
/ elastic->Tmax;
et->U = et->Umax;
et->oldU = 0;
et->T = et->Tmin;
et->dltimer = -1;
 
lev->elist[p].U = lev->elist[p].Umax;
lev->elist[p].T = lev->elist[p].Tmin;
 
if (ELASTIC_compress(lev) == -1) {
lev->elist[p].flags = ELASTIC_EMPTY_SLOT;
return -1;
}
 
proc_table[p].avail_time = elastic->C;
proc_table[p].wcet = elastic->C;
proc_table[p].control |= CONTROL_CAP;
280,13 → 346,13
 
static void ELASTIC_public_detach(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
//ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
 
}
 
static int ELASTIC_public_eligible(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
//ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
 
return 0;
 
296,10 → 362,6
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
 
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:Dsp:%d)",p);
#endif
 
level_table[ lev->scheduling_level ]->
private_dispatch(lev->scheduling_level,p,nostop);
 
309,19 → 371,16
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
 
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:Epi:%d)",p);
#endif
 
/* check if the wcet is finished... */
if (proc_table[p].avail_time <= 0) {
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,(unsigned short int)proc_table[p].context,0);
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,
(unsigned short int)proc_table[p].context,0);
kern_raise(XWCET_VIOLATION,p);
}
 
level_table[ lev->scheduling_level ]->
level_table[lev->scheduling_level]->
private_epilogue(lev->scheduling_level,p);
 
}
328,23 → 387,28
 
static void ELASTIC_public_activate(LEVEL l, PID p, struct timespec *t)
{
 
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
ELASTIC_task_descr *et = &lev->elist[p];
 
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:Act:%d)", p);
#endif
/* check if we are not in the SLEEP state */
if (proc_table[p].status != SLEEP) {
return;
}
 
et->flags |= ELASTIC_PRESENT;
if (ELASTIC_compress(lev) == -1) {
et->flags &= ~ELASTIC_PRESENT;
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_public_activate: compression failed!\n");
#endif
return;
}
 
ELASTIC_activation(lev,p,t);
 
/* Next activation */
kern_event_post(&(lev->elist[p].dline), ELASTIC_timer_act, (void *)(p));
et->dltimer = kern_event_post(&et->dline, ELASTIC_timer_act, (void *)(p));
 
}
 
362,9 → 426,11
static void ELASTIC_public_block(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
ELASTIC_task_descr *et = &lev->elist[p];
 
level_table[ lev->scheduling_level ]->
level_table[lev->scheduling_level]->
private_extract(lev->scheduling_level,p);
et->flags &= ~ELASTIC_JOB_PRESENT;
 
}
 
371,17 → 437,12
static int ELASTIC_public_message(LEVEL l, PID p, void *m)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
struct timespec acttime;
 
switch((long)(m)) {
 
case (long)(NULL):
 
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:EndCyc:%d)",p);
#endif
 
level_table[ lev->scheduling_level ]->
level_table[lev->scheduling_level]->
private_extract(lev->scheduling_level,p);
 
proc_table[p].status = ELASTIC_IDLE;
393,10 → 454,6
 
case 1:
 
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:Disable:%d)",p);
#endif
 
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
 
461,7 → 518,7
lev->elist[i].C = 0;
lev->elist[i].E = 0;
lev->elist[i].beta = 0;
lev->elist[i].flags = ELASTIC_EMPTY_SLOT;
lev->elist[i].flags = 0;
}
 
lev->U = U;
470,8 → 527,43
 
lev->current_level = l;
 
lev->flags = flags;
lev->flags = 0;
 
return l;
}
 
 
/* Force the period of task p to a given value */
 
int ELASTIC_set_period(PID p, TIME period) {
 
SYS_FLAGS f;
int saveE;
ext_bandwidth_t saveU;
 
f = kern_fsave();
 
ELASTIC_level_des *lev;
lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
ELASTIC_task_descr *et = &lev->elist[p];
 
saveE = et->E;
saveU = et->U;
 
et->E = 0; /* set elasticity to zero to make correct compression */
et->U = ((long long)MAX_BANDWIDTH * (long long)et->C)/period;
if (ELASTIC_compress(lev) == -1) {
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_set_period failed: could not compress\n");
#endif
et->E = saveE;
et->U = saveU;
kern_frestore(f);
return -1;
}
 
et->E = saveE; /* Restore E when compression is done */
kern_frestore(f);
return 0;
}
/shark/trunk/include/modules/elastic.h
47,5 → 47,7
 
LEVEL ELASTIC_register_level(int flags, LEVEL master, ext_bandwidth_t U);
 
int ELASTIC_set_period(PID p, TIME period);
 
__END_DECLS
#endif