Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 696 → Rev 697

/shark/trunk/kernel/modules/elastic.c
54,15 → 54,23
 
typedef struct {
 
struct timespec dline;
/* Task parameters (set/changed by the user) */
 
TIME Tmin; /* The nominal (minimum) period */
TIME Tmax; /* The maximum tolerable period */
TIME C; /* The declared worst-case execution time */
int E; /* The elasticity coefficient */
int beta; /* PERIOD_SCALING or WCET_SCALING */
 
/* Task variables (changed by the module) */
 
struct timespec dline; /* The current absolute deadline */
TIME Tmin;
TIME Tmax;
TIME C;
int E;
int beta;
bandwidth_t Umax; /* The maximum utilization, Umax = C/Tmin */
bandwidth_t Umin; /* The minimum utilization, Umin = C/Tmax */
 
TIME T;
bandwidth_t U; /* The current utilization */
TIME T; /* The current period, T = C/U */
 
int flags;
 
73,7 → 81,7
 
bandwidth_t U; /*+ the bandwidth reserved for elastic tasks +*/
 
ELASTIC_task_descr *elist;//[MAX_PROC];
ELASTIC_task_descr elist[MAX_PROC];
 
LEVEL scheduling_level;
 
83,36 → 91,87
 
} ELASTIC_level_des;
 
static int ELASTIC_recompute(ELASTIC_level_des *lev) {
 
/* Checks feasability and computes new utilizations for the task set */
 
static int ELASTIC_compress(ELASTIC_level_des *lev) {
 
PID i;
ELASTIC_task_descr *t;
int ok;
ext_bandwidth_t Umin; // minimum utilization
ext_bandwidth_t Umax; // nominal (maximum) utilization of compressable tasks
 
ext_bandwidth_t Uf; // amount of non-compressable utilization
int Ev; // sum of elasticity among compressable tasks
 
Umin = 0;
Umax = 0;
 
for (i=0; i<MAX_PROC; i++) {
if (lev->elist[i].flags & ELASTIC_PRESENT) {
lev->elist[i].T = lev->elist[i].Tmax;
t = &lev->elist[i];
if (t->flags & ELASTIC_PRESENT) {
if (t->E == 0) {
Umin += t->U;
} else {
Umin += t->Umin;
t->U = t->Umax; // reset previous saturations
}
}
}
if (Umin > lev->U) return -1; // NOT FEASIBLE
 
return 0;
do {
Uf = 0;
Ev = 0;
Umax = 0;
 
}
for (i=0; i<MAX_PROC; i++) {
t = &lev->elist[i];
if (t->flags & ELASTIC_PRESENT) {
if (t->E == 0 || t->U == t->Umin) {
Uf += t->U;
} else {
Ev += t->E;
Umax += t->Umax;
}
}
}
ok = 1;
for (i=0; i<MAX_PROC; i++) {
t = &lev->elist[i];
if (t->flags & ELASTIC_PRESENT) {
if (t->E > 0 && t->U > t->Umin) {
t->U = t->Umax - (Umax - lev->U + Uf) * t->E / Ev;
if (t->U < t->Umin) {
t->U = t->Umin;
ok = 0;
}
t->T = ((long long)t->C * (long long)MAX_BANDWIDTH) / t->U;
}
}
}
 
static int ELASTIC_check_guarantee(ELASTIC_level_des *lev) {
} while (ok == 0);
 
PID i;
 
cprintf("New periods: ");
for (i=0; i<MAX_PROC; i++) {
if (lev->elist[i].flags & ELASTIC_PRESENT) {
lev->elist[i].T = lev->elist[i].Tmax;
t = &lev->elist[i];
if (t->flags & ELASTIC_PRESENT) {
cprintf("%s:%d ", proc_table[i].name, t->T);
}
}
cprintf("\n");
 
return 0; // FEASIBLE
 
return 0;
 
}
 
 
static void ELASTIC_activation(ELASTIC_level_des *lev,
PID p,
struct timespec *acttime)
134,7 → 193,7
}
 
static void ELASTIC_timer_act(void *arg) {
 
PID p = (PID)(arg);
ELASTIC_level_des *lev;
struct timespec acttime;
142,12 → 201,13
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:Timer:%d)",p);
#endif
 
kern_gettime(&acttime);
 
lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
ELASTIC_activation(lev, p, &acttime);
 
ELASTIC_activation(lev, p, &acttime);
 
event_need_reschedule();
 
/* Next activation */
168,12 → 228,35
}
}
 
/* Checks if the current task set is feasible. Returns 1=yes, 0=no */
static int ELASTIC_feasible(ELASTIC_level_des *lev)
{
ext_bandwidth_t Umin = 0;
PID i;
 
for (i=0; i<MAX_PROC; i++) {
if (lev->elist[i].flags & ELASTIC_PRESENT) {
if (lev->elist[i].E == 0) {
/* The task is not elastic. Use current utilization U */
Umin += lev->elist[i].U;
} else {
/* The task is elastic. Use minimum utilization Umin */
Umin += lev->elist[i].Umin;
}
}
}
if (Umin > lev->U) {
return 0;
} else {
return 1;
}
}
 
 
static int ELASTIC_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
ELASTIC_TASK_MODEL *elastic = (ELASTIC_TASK_MODEL *)m;
ext_bandwidth_t Umin = 0;
 
if (m->pclass != ELASTIC_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
191,38 → 274,27
lev->elist[p].E = elastic->E;
lev->elist[p].beta = elastic->beta;
 
lev->elist[p].T = elastic->Tmin;
lev->elist[p].Umax = ((long long)MAX_BANDWIDTH * (long long)elastic->C)
/ elastic->Tmin;
lev->elist[p].Umin = ((long long)MAX_BANDWIDTH * (long long)elastic->C)
/ elastic->Tmax;
 
/* check if new task can be admitted: */
/* compute minimum consumed bandwidth of all tasks */
Umin = 0;
for (i=0; i<MAX_PROC; i++) {
if (lev->elist[i].flags & ELASTIC_PRESENT) {
if (lev->elist[i].E == 0) {
/* The task is not elastic. Use current period T */
Umin += (MAX_BANDWIDTH / lev->elist[i].T) * lev->elist[i].C;
} else {
/* The task is elastic. Use maximum period Tmax */
Umin += (MAX_BANDWIDTH / lev->elist[i].Tmax) * lev->elist[i].C;
}
}
}
if (Umin > lev->U) {
/* failed to add task */
lev->elist[p].U = lev->elist[p].Umax;
lev->elist[p].T = lev->elist[p].Tmin;
 
if (ELASTIC_compress(lev) == -1) {
lev->elist[p].flags = ELASTIC_EMPTY_SLOT;
return -1;
}
 
ELASTIC_recompute(lev);
 
proc_table[p].avail_time = elastic->C;
proc_table[p].wcet = elastic->C;
proc_table[p].control |= CONTROL_CAP;
 
return 0; /* OK, also if the task cannot be guaranteed... */
 
return 0;
}
 
 
static void ELASTIC_public_detach(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
396,8 → 468,6
lev->l.public_block = ELASTIC_public_block;
lev->l.public_message = ELASTIC_public_message;
 
lev->elist = kern_alloc(MAX_PROC * sizeof(ELASTIC_task_descr));
 
/* fill the ELASTIC task descriptor part */
for (i=0; i<MAX_PROC; i++) {
NULL_TIMESPEC(&(lev->elist[i].dline));
404,6 → 474,7
lev->elist[i].Tmin = 0;
lev->elist[i].Tmax = 0;
lev->elist[i].T = 0;
lev->elist[i].U = 0;
lev->elist[i].C = 0;
lev->elist[i].E = 0;
lev->elist[i].beta = 0;