Rev 683 |
Blame |
Compare with Previous |
Last modification |
View Log
| RSS feed
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Giacomo Guidi <giacomo@gandalf.sssup.it>
* Mauro Marinoni
* Anton Cervin
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <stdlib.h>
#include <modules/elastic.h>
#include <tracer.h>
#define ELASTIC_EMPTY_SLOT 0
#define ELASTIC_PRESENT 1
#define ELASTIC_IDLE APER_STATUS_BASE
//#define ELASTIC_DEBUG
typedef struct {
struct timespec dline;
TIME Tmin;
TIME Tmax;
TIME C;
int E;
int beta;
TIME T;
int flags;
} ELASTIC_task_descr;
typedef struct {
level_des l; /*+ the standard level descriptor +*/
bandwidth_t U; /*+ the bandwidth reserved for elastic tasks +*/
ELASTIC_task_descr *elist;//[MAX_PROC];
LEVEL scheduling_level;
LEVEL current_level;
int flags;
} ELASTIC_level_des;
static int ELASTIC_recompute(ELASTIC_level_des *lev) {
PID i;
for (i=0; i<MAX_PROC; i++) {
if (lev->elist[i].flags & ELASTIC_PRESENT) {
lev->elist[i].T = lev->elist[i].Tmax;
}
}
return 0;
}
static int ELASTIC_check_guarantee(ELASTIC_level_des *lev) {
PID i;
for (i=0; i<MAX_PROC; i++) {
if (lev->elist[i].flags & ELASTIC_PRESENT) {
lev->elist[i].T = lev->elist[i].Tmax;
}
}
return 0;
}
static void ELASTIC_activation(ELASTIC_level_des *lev,
PID p,
struct timespec *acttime)
{
JOB_TASK_MODEL job;
/* Job deadline */
TIMESPEC_ASSIGN(&(lev->elist[p].dline),acttime);
ADDUSEC2TIMESPEC(lev->elist[p].T,&(lev->elist[p].dline));
proc_table[p].avail_time = lev->elist[p].C;
proc_table[p].wcet = lev->elist[p].C;
/* Job insertion */
job_task_default_model(job, lev->elist[p].dline);
level_table[ lev->scheduling_level ]->
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job);
}
static void ELASTIC_timer_act(void *arg) {
PID p = (PID)(arg);
ELASTIC_level_des *lev;
struct timespec acttime;
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:Timer:%d)",p);
#endif
kern_gettime(&acttime);
lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
ELASTIC_activation(lev, p, &acttime);
event_need_reschedule();
/* Next activation */
kern_event_post(&(lev->elist[p].dline), ELASTIC_timer_act, (void *)(p));
}
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int ELASTIC_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
} else {
return 0;
}
}
static int ELASTIC_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
ELASTIC_TASK_MODEL *elastic = (ELASTIC_TASK_MODEL *)m;
ext_bandwidth_t Umin = 0;
if (m->pclass != ELASTIC_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
if (elastic->C == 0) return -1;
if (elastic->Tmin > elastic->Tmax) return -1;
if (elastic->Tmax == 0) return -1;
lev->elist[p].flags |= ELASTIC_PRESENT;
NULL_TIMESPEC(&(lev->elist[p].dline));
lev->elist[p].Tmin = elastic->Tmin;
lev->elist[p].Tmax = elastic->Tmax;
lev->elist[p].C = elastic->C;
lev->elist[p].E = elastic->E;
lev->elist[p].beta = elastic->beta;
lev->elist[p].T = elastic->Tmin;
/* check if new task can be admitted: */
/* compute minimum consumed bandwidth of all tasks */
Umin = 0;
for (i=0; i<MAX_PROC; i++) {
if (lev->elist[i].flags & ELASTIC_PRESENT) {
if (lev->elist[i].E == 0) {
/* The task is not elastic. Use current period T */
Umin += (MAX_BANDWIDTH / lev->elist[i].T) * lev->elist[i].C;
} else {
/* The task is elastic. Use maximum period Tmax */
Umin += (MAX_BANDWIDTH / lev->elist[i].Tmax) * lev->elist[i].C;
}
}
}
if (Umin > lev->U) {
/* failed to add task */
lev->elist[p].flags = ELASTIC_EMPTY_SLOT;
return -1;
}
ELASTIC_recompute(lev);
proc_table[p].avail_time = elastic->C;
proc_table[p].wcet = elastic->C;
proc_table[p].control |= CONTROL_CAP;
return 0; /* OK, also if the task cannot be guaranteed... */
}
static void ELASTIC_public_detach(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
}
static int ELASTIC_public_eligible(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
return 0;
}
static void ELASTIC_public_dispatch(LEVEL l, PID p, int nostop)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:Dsp:%d)",p);
#endif
level_table[ lev->scheduling_level ]->
private_dispatch(lev->scheduling_level,p,nostop);
}
static void ELASTIC_public_epilogue(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:Epi:%d)",p);
#endif
/* check if the wcet is finished... */
if (proc_table[p].avail_time <= 0) {
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,(unsigned short int)proc_table[p].context,0);
kern_raise(XWCET_VIOLATION,p);
}
level_table[ lev->scheduling_level ]->
private_epilogue(lev->scheduling_level,p);
}
static void ELASTIC_public_activate(LEVEL l, PID p, struct timespec *t)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:Act:%d)", p);
#endif
/* check if we are not in the SLEEP state */
if (proc_table[p].status != SLEEP) {
return;
}
ELASTIC_activation(lev,p,t);
/* Next activation */
kern_event_post(&(lev->elist[p].dline), ELASTIC_timer_act, (void *)(p));
}
static void ELASTIC_public_unblock(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
struct timespec acttime;
kern_gettime(&acttime);
ELASTIC_activation(lev,p,&acttime);
}
static void ELASTIC_public_block(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
}
static int ELASTIC_public_message(LEVEL l, PID p, void *m)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
struct timespec acttime;
switch((long)(m)) {
case (long)(NULL):
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:EndCyc:%d)",p);
#endif
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
proc_table[p].status = ELASTIC_IDLE;
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
break;
case 1:
#ifdef ELASTIC_DEBUG
printk("(ELASTIC:Disable:%d)",p);
#endif
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
proc_table[p].status = SLEEP;
TRACER_LOGEVENT(FTrace_EVT_task_disable,(unsigned short int)proc_table[p].context,(unsigned int)l);
break;
}
return 0;
}
static void ELASTIC_public_end(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
}
/*+ Registration function +*/
LEVEL ELASTIC_register_level(int flags, LEVEL master, bandwidth_t U)
{
LEVEL l; /* the level that we register */
ELASTIC_level_des *lev; /* for readableness only */
PID i;
printk("ELASTIC_register_level\n");
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(ELASTIC_level_des));
lev = (ELASTIC_level_des *)level_table[l];
/* fill the standard descriptor */
if (flags & ELASTIC_ENABLE_GUARANTEE)
lev->l.public_guarantee = ELASTIC_public_guarantee;
else
lev->l.public_guarantee = NULL;
lev->l.public_create = ELASTIC_public_create;
lev->l.public_detach = ELASTIC_public_detach;
lev->l.public_end = ELASTIC_public_end;
lev->l.public_eligible = ELASTIC_public_eligible;
lev->l.public_dispatch = ELASTIC_public_dispatch;
lev->l.public_epilogue = ELASTIC_public_epilogue;
lev->l.public_activate = ELASTIC_public_activate;
lev->l.public_unblock = ELASTIC_public_unblock;
lev->l.public_block = ELASTIC_public_block;
lev->l.public_message = ELASTIC_public_message;
lev->elist = kern_alloc(MAX_PROC * sizeof(ELASTIC_task_descr));
/* fill the ELASTIC task descriptor part */
for (i=0; i<MAX_PROC; i++) {
NULL_TIMESPEC(&(lev->elist[i].dline));
lev->elist[i].Tmin = 0;
lev->elist[i].Tmax = 0;
lev->elist[i].T = 0;
lev->elist[i].C = 0;
lev->elist[i].E = 0;
lev->elist[i].beta = 0;
lev->elist[i].flags = ELASTIC_EMPTY_SLOT;
}
lev->U = U;
lev->scheduling_level = master;
lev->current_level = l;
lev->flags = flags;
return l;
}