Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 960 → Rev 961

/shark/trunk/modules/rrsoft/rrsoft/rrsoft.h
0,0 → 1,140
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: rrsoft.h,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
This file contains the scheduling module RRSOFT (Round Robin for
Hard & Soft Model)
 
Title:
RRSOFT (Round Robin) version 1
 
Task Models Accepted:
NRT_TASK_MODEL - Non-Realtime Tasks
weight field is ignored
slice field is used to set the slice of a task, if it is !=0
policy field is ignored
inherit field is ignored
SOFT_TASK_MODEL - Soft tasks
only the periodicity and the met are used.
HARD_TASK_MODEL - Hard tasks
only the periodicity and the period are used.
 
 
Description:
This module schedule his tasks following the classic round-robin
scheme. The default timeslice is given at registration time and is a
a per-task specification. The default timeslice is used if the slice
field in the NRT_TASK_MODEL is 0.
 
The module can SAVE or SKIP activations
There is another module, RR, thar always SKIP activations...
 
The Module is derived from the RRSOFT, and it accepts only one
kind of task Model (hard, soft or nrt).
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests. When a guest operation
is called, the exception is raised.
 
Restrictions & special features:
- if specified, it creates at init time a task,
called "Main", attached to the function __init__().
- There must be only one module in the system that creates a task
attached to the function __init__().
- The level tries to guarantee that a task uses a "full" timeslice
before going to the queue tail. "full" means that a task can execute
a maximum time of slice+sys_tick due to the approx. done by
the Virtual Machine. If a task execute more time than the slice,
the next time it execute less...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __RRSOFT_H__
#define __RRSOFT_H__
 
#include <ll/ll.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
extern TASK __init__(void *arg);
 
 
 
/*+ Const: +*/
#define RRSOFT_MINIMUM_SLICE 1000 /*+ Minimum Timeslice +*/
#define RRSOFT_MAXIMUM_SLICE 500000 /*+ Maximum Timeslice +*/
 
#define RRSOFT_MAIN_YES 1 /*+ The level creates the main +*/
#define RRSOFT_MAIN_NO 0 /*+ The level does'nt create the main +*/
 
#define RRSOFT_ONLY_HARD 1 /*+ The level accepts only Hard Tasks +*/
#define RRSOFT_ONLY_SOFT 2 /*+ The level accepts only Soft Tasks +*/
#define RRSOFT_ONLY_NRT 4 /*+ The level accepts only NRT Tasks +*/
 
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified
 
returns the level number at which the module has been registered.
+*/
 
LEVEL RRSOFT_register_level(TIME slice,
int createmain,
struct multiboot_info *mb,
BYTE models);
 
__END_DECLS
#endif
/shark/trunk/modules/rrsoft/rrsoft.c
0,0 → 1,437
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: rrsoft.c,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
This file contains the scheduling module RRSOFT (Round Robin)
 
Read rrsoft.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRSOFTANTY; without even the implied waRRSOFTanty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <rrsoft/rrsoft/rrsoft.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
/*+ Status used in the level +*/
#define RRSOFT_READY MODULE_STATUS_BASE
#define RRSOFT_IDLE MODULE_STATUS_BASE+2
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
IQUEUE ready; /*+ the ready queue +*/
 
int slice; /*+ the level's time slice +*/
 
TIME period[MAX_PROC]; /*+ activation period +*/
 
struct timespec reactivation_time[MAX_PROC];
/*+ the time at witch the reactivation timer is post +*/
int reactivation_timer[MAX_PROC];
/*+ the recativation timer +*/
 
BYTE periodic[MAX_PROC];
 
 
struct multiboot_info *multiboot; /*+ used if the level have to insert
the main task +*/
 
BYTE models; /*+ Task Model that the Module can Handle +*/
} RRSOFT_level_des;
 
 
/* this is the periodic reactivation of the task... it is posted only
if the task is a periodic task */
static void RRSOFT_timer_reactivate(void *par)
{
PID p = (PID) par;
RRSOFT_level_des *lev;
// kern_printf("react");
 
lev = (RRSOFT_level_des *)level_table[proc_table[p].task_level];
 
if (proc_table[p].status == RRSOFT_IDLE) {
/* the task has finished the current activation and must be
reactivated */
proc_table[p].status = RRSOFT_READY;
iq_insertlast(p,&lev->ready);
 
event_need_reschedule();
}
else if (lev->nact[p] >= 0)
/* the task has not completed the current activation, so we save
the activation incrementing nact... */
lev->nact[p]++;
 
/* repost the event at the next period end... */
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
RRSOFT_timer_reactivate,
(void *)p);
}
 
/* This is not efficient but very fair :-)
The need of all this stuff is because if a task execute a long time
due to (shadow!) priority inheritance, then the task shall go to the
tail of the queue many times... */
static PID RRSOFT_public_scheduler(LEVEL l)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
PID p;
 
for (;;) {
p = iq_query_first(&lev->ready);
if (p == -1)
return p;
//{kern_printf("(s%d)",p); return p;}
 
// kern_printf("(p=%d l=%d avail=%d wcet =%d)\n",p,l,proc_table[p].avail_time, proc_table[p].wcet);
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_extract(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
else
//{kern_printf("(s%d)",p); return p;}
return p;
 
}
}
 
static int RRSOFT_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
// kern_printf("create %d mod %d\n",p,m->pclass);
/* the task state is set at SLEEP by the general task_create
the only thing to set remains the capacity stuffs that are set
to the values passed in the model... */
 
if (m->pclass==NRT_PCLASS && !(lev->models & RRSOFT_ONLY_NRT) ) return -1;
if (m->pclass==SOFT_PCLASS && !(lev->models & RRSOFT_ONLY_SOFT) ) return -1;
if (m->pclass==HARD_PCLASS && !(lev->models & RRSOFT_ONLY_HARD) ) return -1;
if (m->level != 0 && m->level != l) return -1;
 
/* I used the wcet field because using wcet can account if a task
consume more than the timeslice... */
 
if (lev->models & RRSOFT_ONLY_NRT &&
(m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))) {
NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m;
 
// kern_printf("nrt");
if (nrt->slice) {
proc_table[p].avail_time = nrt->slice;
proc_table[p].wcet = nrt->slice;
}
else {
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
}
proc_table[p].control |= CONTROL_CAP;
if (nrt->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
 
lev->periodic[p] = 0;
lev->period[p] = 0;
}
else if (lev->models & RRSOFT_ONLY_SOFT &&
(m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l))) {
SOFT_TASK_MODEL *soft = (SOFT_TASK_MODEL *)m;
// kern_printf("soft");
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
proc_table[p].control |= CONTROL_CAP;
if (soft->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
 
if (soft->periodicity == PERIODIC) {
lev->periodic[p] = 1;
lev->period[p] = soft->period;
}
}
else if (lev->models & RRSOFT_ONLY_HARD &&
(m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l))) {
HARD_TASK_MODEL *hard = (HARD_TASK_MODEL *)m;
// kern_printf("hard");
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
proc_table[p].control |= CONTROL_CAP;
lev->nact[p] = 0;
 
if (hard->periodicity == PERIODIC) {
lev->periodic[p] = 1;
lev->period[p] = hard->mit;
}
}
 
return 0; /* OK */
}
 
static void RRSOFT_public_dispatch(LEVEL l, PID p, int nostop)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
static void RRSOFT_public_epilogue(LEVEL l, PID p)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
/* check if the slice is finished and insert the task in the coRRSOFTect
qqueue position */
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_insertlast(p,&lev->ready);
}
else
/* curr is >0, so the running task have to run for another cuRRSOFT usec */
iq_insertfirst(p,&lev->ready);
 
proc_table[p].status = RRSOFT_READY;
}
 
static void RRSOFT_public_activate(LEVEL l, PID p, struct timespec *t)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
/* Test if we are trying to activate a non sleeping task */
/* save activation (only if needed... */
if (proc_table[p].status != SLEEP && proc_table[p].status != RRSOFT_IDLE) {
if (lev->nact[p] != -1)
lev->nact[p]++;
return;
}
 
/* Insert task in the correct position */
proc_table[p].status = RRSOFT_READY;
iq_insertlast(p,&lev->ready);
 
/* Set the reactivation timer */
if (lev->periodic[p])
{
kern_gettime(&lev->reactivation_time[p]);
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
RRSOFT_timer_reactivate,
(void *)p);
}
}
 
static void RRSOFT_public_unblock(LEVEL l, PID p)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
/* Similar to RRSOFT_task_activate, but we don't check in what state
the task is */
 
/* Insert task in the coRRSOFTect position */
proc_table[p].status = RRSOFT_READY;
iq_insertlast(p,&lev->ready);
}
 
static void RRSOFT_public_block(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
 
So, we do nothing!!!
*/
}
 
static int RRSOFT_public_message(LEVEL l, PID p, void *m)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
if (lev->nact[p] > 0) {
/* continue!!!! */
lev->nact[p]--;
// qq_insertlast(p,&lev->ready);
iq_insertfirst(p,&lev->ready);
proc_table[p].status = RRSOFT_READY;
}
else
proc_table[p].status = RRSOFT_IDLE;
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
return 0;
}
 
static void RRSOFT_public_end(LEVEL l, PID p)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
lev->nact[p] = -1;
 
/* we delete the reactivation timer */
if (lev->periodic[p]) {
kern_event_delete(lev->reactivation_timer[p]);
lev->reactivation_timer[p] = -1;
}
 
/* then, we insert the task in the free queue */
proc_table[p].status = FREE;
iq_insertlast(p,&freedesc);
}
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
static void RRSOFT_call_main(void *l)
{
LEVEL lev;
PID p;
NRT_TASK_MODEL m;
void *mb;
 
lev = (LEVEL)l;
 
nrt_task_default_model(m);
nrt_task_def_level(m,lev); /* with this we are sure that the task aRRSOFTives
to the coRRSOFTect level */
 
mb = ((RRSOFT_level_des *)level_table[lev])->multiboot;
nrt_task_def_arg(m,mb);
nrt_task_def_usemath(m);
nrt_task_def_nokill(m);
nrt_task_def_ctrl_jet(m);
 
p = task_create("Main", __init__, (TASK_MODEL *)&m, NULL);
 
if (p == NIL)
printk("\nPanic!!! can't create main task...\n");
 
RRSOFT_public_activate(lev,p,NULL);
}
 
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified +*/
LEVEL RRSOFT_register_level(TIME slice,
int createmain,
struct multiboot_info *mb,
BYTE models)
{
LEVEL l; /* the level that we register */
RRSOFT_level_des *lev; /* for readableness only */
PID i;
 
printk("RRSOFT_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(RRSOFT_level_des));
 
lev = (RRSOFT_level_des *)level_table[l];
 
/* fill the standard descriptor */
lev->l.public_scheduler = RRSOFT_public_scheduler;
lev->l.public_create = RRSOFT_public_create;
lev->l.public_end = RRSOFT_public_end;
lev->l.public_dispatch = RRSOFT_public_dispatch;
lev->l.public_epilogue = RRSOFT_public_epilogue;
lev->l.public_activate = RRSOFT_public_activate;
lev->l.public_unblock = RRSOFT_public_unblock;
lev->l.public_block = RRSOFT_public_block;
lev->l.public_message = RRSOFT_public_message;
 
/* fill the RRSOFT descriptor part */
for (i = 0; i < MAX_PROC; i++) {
lev->nact[i] = -1;
NULL_TIMESPEC(&lev->reactivation_time[i]);
lev->reactivation_timer[i] = -1;
lev->periodic[i] = 0;
lev->period[i] = 0;
}
 
iq_init(&lev->ready, &freedesc, 0);
 
if (slice < RRSOFT_MINIMUM_SLICE) slice = RRSOFT_MINIMUM_SLICE;
if (slice > RRSOFT_MAXIMUM_SLICE) slice = RRSOFT_MAXIMUM_SLICE;
lev->slice = slice;
 
lev->multiboot = mb;
 
lev->models = models;
 
if (createmain)
sys_atrunlevel(RRSOFT_call_main,(void *) l, RUNLEVEL_INIT);
 
return l;
}
 
 
 
 
/shark/trunk/modules/rrsoft/subdir.mk
0,0 → 1,0
OBJS += rrsoft/rrsoft.o
/shark/trunk/modules/ps/ps/ps.h
0,0 → 1,135
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: ps.h,v 1.1 2005-02-25 10:40:58 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:40:58 $
------------
 
This file contains the aperiodic server PS (Polling Server)
 
Title:
PS (Polling Server)
 
Task Models Accepted:
SOFT_TASK_MODEL - Soft Tasks
wcet field is ignored
met field is ignored
period field is ignored
periodicity field can be only APERIODIC
arrivals field can be either SAVE or SKIP
 
Description:
This module schedule his tasks following the Polling Server scheme.
 
All the tasks are put in a FIFO (FCFS) queue and at a time only the first
task in the queue is put in the upper level.
 
The module remembers pending activations when calling task_sleep...
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests. When a guest operation
is called, the exception is raised.
 
Restrictions & special features:
- This level doesn't manage the main task.
- At init time we have to specify:
. The Capacity and the period used by the server
- The level don't use the priority field.
- A function to return the used bandwidth of the level is provided.
- if an aperiodic task calls a task_delay when owning a mutex implemented
with shadows, the delay may have no effect, so don't use delay when
using a mutex!!!
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __PS_H__
#define __PS_H__
 
#include <ll/ll.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/*+ 1 - ln(2) +*/
#ifndef RM_MINFREEBANDWIDTH
#define RM_MINFREEBANDWIDTH 1317922825
#endif
 
/*+ flags... +*/
#define PS_DISABLE_ALL 0
#define PS_ENABLE_BACKGROUND 1 /*+ Background scheduling enabled +*/
#define PS_ENABLE_GUARANTEE_EDF 2 /*+ Task Guarantee enabled +*/
#define PS_ENABLE_ALL_EDF 3 /*+ All flags enabled +*/
 
#define PS_ENABLE_GUARANTEE_RM 4 /*+ Task Guarantee enabled +*/
#define PS_ENABLE_ALL_RM 5 /*+ All flags enabled +*/
 
/*+ internal flags +*/
#define PS_BACKGROUND 8 /*+ this flag is set when scheduling
in background +*/
#define PS_BACKGROUND_BLOCK 16 /*+ this flag is set when we want to
blocks the background scheduling +*/
 
/*+ Registration function:
bandwidth_t b Max bandwidth used by the TBS
int flags Options to be used in this level instance...
LEVEL master the level that must be used as master level for the
TBS tasks
int num,den used to compute the TBS bandwidth
 
returns the level number at which the module has been registered.
+*/
LEVEL PS_register_level(int flags, LEVEL master, int Cs, int per);
 
/*+ Returns the used bandwidth of a level +*/
bandwidth_t PS_usedbandwidth(LEVEL l);
 
__END_DECLS
#endif
/shark/trunk/modules/ps/ps.c
0,0 → 1,530
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: ps.c,v 1.1 2005-02-25 10:40:58 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:40:58 $
------------
 
This file contains the aperiodic server PS (Polling Server)
 
when scheduling in background the flags field has the PS_BACKGROUND bit set
 
when scheduling a task because it is pointed by another task via shadows,
the task have to be extracted from the wait queue or the master level. To
check this we have to look at the activated field; it is != NIL if a task
is inserted into the master level. Only a task at a time can be inserted
into the master level.
 
The capacity of the server must be updated
- when scheduling a task normally
- when scheduling a task because it is pointed by a shadow
but not when scheduling in background.
 
When a task is extracted from the system no scheduling has to be done
until the task reenter into the system. to implement this, when a task
is extracted we block the background scheduling (the scheduling with the
master level is already blocked because the activated field is not
reset to NIL) using the PS_BACKGROUNDBLOCK bit.
 
nact[p] is -1 if the task set the activations to SKIP, >= 0 otherwise
 
Note that if the period event fires and there aren't any task to schedule,
the server capacity is set to 0. This is correct, but there is a subtle
variant: the server capacity may be set to 0 later because if at the
period end the running task have priority > than the server, the capacity
may be set to zero the first time the server become the highest priority
running task and there aren't task to serve. The second implementation
is more efficient but more complicated, because normally we don't know the
priority of the running task.
 
An implementation can be done in this way: when there are not task to
schedule, we does not set the lev->activated field to nil, but to a "dummy"
task that is inserted into the master level queue.
When the master level scheduler try to schedule the "dummy" task (this is
the situation in witch there are not task to schedule and the PS is the
task with greater priority), it calls the PS_task_eligible, that set the
server capacity to 0, remove the dummy task from the queue with a guest_end
and ask to reschedule.
 
Because this implementation is more complex than the first, I don't
implement it... see (*), near line 169, 497 and 524
 
 
Read PS.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <ps/ps/ps.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
/*+ Status used in the level +*/
#define PS_WAIT APER_STATUS_BASE /*+ waiting the service +*/
 
/*+ the level redefinition for the Total Bandwidth Server level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
/* The wcet are stored in the task descriptor's priority
field, so no other fields are needed */
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
struct timespec lastdline; /*+ the last deadline assigned to
a PS task +*/
 
int Cs; /*+ server capacity +*/
int availCs; /*+ server avail time +*/
 
IQUEUE wait; /*+ the wait queue of the PS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth by the server +*/
int period;
 
LEVEL scheduling_level;
 
} PS_level_des;
 
/* This static function activates the task pointed by lev->activated) */
static __inline__ void PS_activation(PS_level_des *lev)
{
PID p; /* for readableness */
JOB_TASK_MODEL j; /* the guest model */
LEVEL m; /* the master level... only for readableness*/
 
p = lev->activated;
m = lev->scheduling_level;
job_task_default_model(j,lev->lastdline);
job_task_def_period(j,lev->period);
level_table[m]->private_insert(m,p,(TASK_MODEL *)&j);
// kern_printf("(%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec);
}
 
static void PS_deadline_timer(void *a)
{
PS_level_des *lev = (PS_level_des *)(level_table[(LEVEL)a]);
 
ADDUSEC2TIMESPEC(lev->period, &lev->lastdline);
 
// kern_printf("(%d:%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec, lev->period);
if (lev->availCs >= 0)
lev->availCs = lev->Cs;
else
lev->availCs += lev->Cs;
 
/* availCs may be <0 because a task executed via a shadow fo many time
lev->activated == NIL only if the prec task was finished and there
was not any other task to be put in the ready queue
... we are now activating the next task */
if (lev->availCs > 0 && lev->activated == NIL) {
if (iq_query_first(&lev->wait) != NIL) {
lev->activated = iq_getfirst(&lev->wait);
PS_activation(lev);
event_need_reschedule();
}
else
lev->availCs = 0; /* see note (*) at the begin of the file */
}
 
kern_event_post(&lev->lastdline, PS_deadline_timer, a);
// kern_printf("!");
}
 
static PID PS_public_schedulerbackground(LEVEL l)
{
/* the PS catch the background time to exec aperiodic activities */
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
lev->flags |= PS_BACKGROUND;
 
if (lev->flags & PS_BACKGROUND_BLOCK)
return NIL;
else
return iq_query_first(&lev->wait);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int PS_public_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int PS_public_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
if (*freebandwidth > lev->U + RM_MINFREEBANDWIDTH) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int PS_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
SOFT_TASK_MODEL *s;
 
if (m->pclass != SOFT_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
s = (SOFT_TASK_MODEL *)m;
if (s->periodicity != APERIODIC) return -1;
s = (SOFT_TASK_MODEL *)m;
 
if (s->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void PS_public_dispatch(LEVEL l, PID p, int nostop)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
struct timespec ty;
 
// if (nostop) kern_printf("NOSTOP!!!!!!!!!!!!");
/* there is at least one task ready inserted in an EDF or similar
level note that we can't check the status because the scheduler set it
to exe before calling task_dispatch. we have to check
lev->activated != p instead */
if (lev->activated != p) {
iq_extract(p, &lev->wait);
//kern_printf("#%d#",p);
}
else {
//if (nostop) kern_printf("(gd status=%d)",proc_table[p].status);
level_table[ lev->scheduling_level ]->
private_dispatch(lev->scheduling_level,p,nostop);
}
 
/* set the capacity timer */
if (!nostop) {
TIMESPEC_ASSIGN(&ty, &schedule_time);
ADDUSEC2TIMESPEC(lev->availCs,&ty);
cap_timer = kern_event_post(&ty, capacity_timer, NULL);
}
 
// kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec);
}
 
static void PS_public_epilogue(LEVEL l, PID p)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & PS_BACKGROUND)
lev->flags &= ~PS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
// kern_printf("(epil %d %d)",lev->availCs, proc_table[p].avail_time);
 
/* check if the server capacity is finished... */
if (lev->availCs < 0) {
// kern_printf("(epil Cs%d %d:%d act%d p%d)",
// lev->availCs,proc_table[p].timespec_priority.tv_sec,
// proc_table[p].timespec_priority.tv_nsec,
// lev->activated,p);
/* the server slice has finished... do the task_end!!!
a first version of the module used the task_endcycle, but it was
not conceptually correct because the task didn't stop because it
finished all the work but because the server didn't have budget!
So, if the task_endcycle is called, the task remain into the
master level, and we can't wake him up if, for example, another
task point the shadow to it!!!*/
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = PS_WAIT;
lev->activated = NIL;
}
else
/* the task has been preempted. it returns into the ready queue or to the
wait queue by calling the guest_epilogue... */
if (lev->activated == p) {//kern_printf("Û1");
level_table[ lev->scheduling_level ]->
private_epilogue(lev->scheduling_level,p);
} else { //kern_printf("Û2");
iq_insertfirst(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
}
 
static void PS_public_activate(LEVEL l, PID p, struct timespec *t)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
if (lev->activated == p || proc_table[p].status == PS_WAIT) {
if (lev->nact[p] != -1)
lev->nact[p]++;
}
else if (proc_table[p].status == SLEEP) {
 
if (lev->activated == NIL && lev->availCs > 0) {
lev->activated = p;
PS_activation(lev);
}
else {
iq_insertlast(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
}
else
{ kern_printf("PS_REJ%d %d %d %d ",p, proc_table[p].status, lev->activated, lev->wait.first);
return; }
 
}
 
static void PS_public_unblock(LEVEL l, PID p)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
lev->flags &= ~PS_BACKGROUND_BLOCK;
 
lev->activated = -1;
 
/* when we reinsert the task into the system, the server capacity
is always 0 because nobody executes with the PS before... */
iq_insertfirst(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
 
static void PS_public_block(LEVEL l, PID p)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
/* update the server capacity */
lev->availCs = 0;
 
lev->flags |= PS_BACKGROUND_BLOCK;
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
}
 
static int PS_public_message(LEVEL l, PID p, void *m)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & PS_BACKGROUND)
lev->flags &= ~PS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
 
if (lev->nact[p] > 0)
{
lev->nact[p]--;
iq_insertlast(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
else
proc_table[p].status = SLEEP;
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated == NIL)
lev->availCs = 0; /* see note (*) at the begin of the file */
else
PS_activation(lev);
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
return 0;
}
 
static void PS_public_end(LEVEL l, PID p)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & PS_BACKGROUND)
lev->flags &= ~PS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
 
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated == NIL)
lev->availCs = 0; /* see note (*) at the begin of the file */
else
PS_activation(lev);
}
 
/* Registration functions */
 
 
/*+ This init function install the PS deadline timer
+*/
static void PS_dline_install(void *l)
{
PS_level_des *lev = (PS_level_des *)(level_table[(LEVEL)l]);
 
kern_gettime(&lev->lastdline);
ADDUSEC2TIMESPEC(lev->period, &lev->lastdline);
 
kern_event_post(&lev->lastdline, PS_deadline_timer, l);
}
 
 
 
/*+ Registration function:
int flags the init flags ... see PS.h +*/
LEVEL PS_register_level(int flags, LEVEL master, int Cs, int per)
{
LEVEL l; /* the level that we register */
PS_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("PS_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(PS_level_des));
 
lev = (PS_level_des *)level_table[l];
 
printk(" lev=%d\n",(int)lev);
 
/* fill the standard descriptor */
 
if (flags & PS_ENABLE_BACKGROUND)
lev->l.public_scheduler = PS_public_schedulerbackground;
 
if (flags & PS_ENABLE_GUARANTEE_EDF)
lev->l.public_guarantee = PS_public_guaranteeEDF;
else if (flags & PS_ENABLE_GUARANTEE_RM)
lev->l.public_guarantee = PS_public_guaranteeRM;
else
lev->l.public_guarantee = NULL;
 
lev->l.public_create = PS_public_create;
lev->l.public_end = PS_public_end;
lev->l.public_dispatch = PS_public_dispatch;
lev->l.public_epilogue = PS_public_epilogue;
lev->l.public_activate = PS_public_activate;
lev->l.public_unblock = PS_public_unblock;
lev->l.public_block = PS_public_block;
lev->l.public_message = PS_public_message;
 
/* fill the PS descriptor part */
 
for (i=0; i<MAX_PROC; i++)
lev->nact[i] = -1;
 
lev->Cs = Cs;
lev->availCs = 0;
 
lev->period = per;
 
iq_init(&lev->wait, &freedesc, 0);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / per) * Cs;
 
lev->scheduling_level = master;
 
lev->flags = flags & 0x07;
 
sys_atrunlevel(PS_dline_install,(void *) l, RUNLEVEL_INIT);
 
return l;
}
 
bandwidth_t PS_usedbandwidth(LEVEL l)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
return lev->U;
}
 
/shark/trunk/modules/ps/subdir.mk
0,0 → 1,0
OBJS += ps/ps.o
/shark/trunk/modules/rr/rr/rr.h
0,0 → 1,124
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: rr.h,v 1.1 2005-02-25 10:45:58 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:45:58 $
------------
 
This file contains the scheduling module RR (Round Robin)
 
Title:
RR (Round Robin)
 
Task Models Accepted:
NRT_TASK_MODEL - Non-Realtime Tasks
weight field is ignored
slice field is used to set the slice of a task, if it is !=0
activation field is ignored
policy field is ignored
inherit field is ignored
 
Description:
This module schedule his tasks following the classic round-robin
scheme. The default timeslice is given at registration time and is a
a per-task specification. The default timeslice is used if the slice
field in the NRT_TASK_MODEL is 0.
 
The module always SKIP instances, either if SAVE_ARRIVALS is set!!!
There is another module, RR2, thar remember activations...
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests. When a guest operation
is called, the exception is raised.
 
Restrictions & special features:
- if specified, it creates at init time a task,
called "Main", attached to the function __init__().
- There must be only one module in the system that creates a task
attached to the function __init__().
- The level tries to guarantee that a task uses a "full" timeslice
before going to the queue tail. "full" means that a task can execute
a maximum time of slice+sys_tick due to the approx. done by
the Virtual Machine. If a task execute more time than the slice,
the next time it execute less...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __RR_H__
#define __RR_H__
 
#include <ll/ll.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
extern TASK __init__(void *arg);
 
 
 
/*+ Const: +*/
#define RR_MINIMUM_SLICE 1000 /*+ Minimum Timeslice +*/
#define RR_MAXIMUM_SLICE 500000 /*+ Maximum Timeslice +*/
 
#define RR_MAIN_YES 1 /*+ The level creates the main +*/
#define RR_MAIN_NO 0 /*+ The level does'nt create the main +*/
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified
 
returns the level number at which the module has been registered.
+*/
LEVEL RR_register_level(TIME slice,
int createmain,
struct multiboot_info *mb);
__END_DECLS
#endif
/shark/trunk/modules/rr/rr.c
0,0 → 1,347
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: rr.c,v 1.1 2005-02-25 10:45:58 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:45:58 $
------------
 
This file contains the scheduling module RR (Round Robin)
 
Read rr.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <rr/rr/rr.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
//#define RRDEBUG
 
#define rr_printf kern_printf
 
/*+ Status used in the level +*/
#define RR_READY MODULE_STATUS_BASE
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
IQUEUE ready; /*+ the ready queue +*/
 
int slice; /*+ the level's time slice +*/
 
struct multiboot_info *multiboot; /*+ used if the level have to insert
the main task +*/
} RR_level_des;
 
/* This is not efficient but very fair :-)
The need of all this stuff is because if a task execute a long time
due to (shadow!) priority inheritance, then the task shall go to the
tail of the queue many times... */
static PID RR_public_scheduler(LEVEL l)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
PID p;
 
#ifdef RRDEBUG
rr_printf("(RRs",p);
#endif
 
for (;;) {
p = iq_query_first(&lev->ready);
 
if (p == -1) {
#ifdef RRDEBUG
rr_printf(" %d)",p);
#endif
return p;
}
 
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_extract(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
else {
#ifdef RRDEBUG
rr_printf(" %d)",p);
#endif
return p;
}
}
}
 
static int RR_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
NRT_TASK_MODEL *nrt;
 
#ifdef RRDEBUG
rr_printf("(create %d!!!!)",p);
#endif
 
if (m->pclass != NRT_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
 
nrt = (NRT_TASK_MODEL *)m;
/* the task state is set at SLEEP by the general task_create
the only thing to set remains the capacity stuffs that are set
to the values passed in the model... */
 
/* I used the wcet field because using wcet can account if a task
consume more than the timeslice... */
 
if (nrt->slice) {
proc_table[p].avail_time = nrt->slice;
proc_table[p].wcet = nrt->slice;
}
else {
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
}
proc_table[p].control |= CONTROL_CAP;
 
#ifdef RRDEBUG
rr_printf("(c%d av%d w%d )",p,proc_table[p].avail_time,proc_table[p].wcet);
#endif
return 0; /* OK */
}
 
static void RR_public_dispatch(LEVEL l, PID p, int nostop)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
 
#ifdef RRDEBUG
rr_printf("(dis%d)",p);
#endif
}
 
static void RR_public_epilogue(LEVEL l, PID p)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
/* check if the slice is finished and insert the task in the correct
qqueue position */
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_insertlast(p,&lev->ready);
}
else
/* curr is >0, so the running task have to run for another curr usec */
iq_insertfirst(p,&lev->ready);
 
proc_table[p].status = RR_READY;
 
#ifdef RRDEBUG
rr_printf("(epi%d)",p);
#endif
}
 
static void RR_public_activate(LEVEL l, PID p, struct timespec *t)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
/* Test if we are trying to activate a non sleeping task */
/* Ignore this; the task is already active */
if (proc_table[p].status != SLEEP)
return;
 
/* Insert task in the correct position */
proc_table[p].status = RR_READY;
iq_insertlast(p,&lev->ready);
 
#ifdef RRDEBUG
rr_printf("(act%d)",p);
#endif
 
}
 
static void RR_public_unblock(LEVEL l, PID p)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
/* Similar to RR_task_activate,
but we don't check in what state the task is */
 
/* Insert task in the correct position */
proc_table[p].status = RR_READY;
iq_insertlast(p,&lev->ready);
 
#ifdef RRDEBUG
rr_printf("(ubl%d)",p);
#endif
}
 
static void RR_public_block(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
 
So, we do nothing!!!
*/
#ifdef RRDEBUG
rr_printf("(bl%d)",p);
#endif
}
 
static int RR_public_message(LEVEL l, PID p, void *m)
{
proc_table[p].status = SLEEP;
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l); /* tracer stuff */
 
#ifdef RRDEBUG
rr_printf("(msg%d)",p);
#endif
return 0;
}
 
static void RR_public_end(LEVEL l, PID p)
{
/* we insert the task in the free queue */
proc_table[p].status = FREE;
iq_insertlast(p,&freedesc);
 
#ifdef RRDEBUG
rr_printf("(end%d)",p);
#endif
}
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
static void RR_call_main(void *l)
{
LEVEL lev;
PID p;
NRT_TASK_MODEL m;
void *mb;
 
lev = (LEVEL)l;
 
nrt_task_default_model(m);
nrt_task_def_level(m,lev); /* with this we are sure that the task arrives
to the correct level */
 
mb = ((RR_level_des *)level_table[lev])->multiboot;
nrt_task_def_arg(m,mb);
nrt_task_def_usemath(m);
nrt_task_def_nokill(m);
nrt_task_def_ctrl_jet(m);
nrt_task_def_stack(m,30000);
 
p = task_create("Main", __init__, (TASK_MODEL *)&m, NULL);
 
if (p == NIL)
printk(KERN_EMERG "Panic!!! can't create main task... errno =%d\n",errno);
 
RR_public_activate(lev,p,NULL);
 
#ifdef RRDEBUG
rr_printf("(main created %d)",p);
#endif
}
 
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified +*/
LEVEL RR_register_level(TIME slice,
int createmain,
struct multiboot_info *mb)
{
LEVEL l; /* the level that we register */
RR_level_des *lev; /* for readableness only */
 
printk("RR_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(RR_level_des));
 
lev = (RR_level_des *)level_table[l];
 
/* fill the standard descriptor */
lev->l.public_scheduler = RR_public_scheduler;
lev->l.public_create = RR_public_create;
lev->l.public_end = RR_public_end;
lev->l.public_dispatch = RR_public_dispatch;
lev->l.public_epilogue = RR_public_epilogue;
lev->l.public_activate = RR_public_activate;
lev->l.public_unblock = RR_public_unblock;
lev->l.public_block = RR_public_block;
lev->l.public_message = RR_public_message;
 
/* fill the RR descriptor part */
iq_init(&lev->ready, &freedesc, 0);
 
if (slice < RR_MINIMUM_SLICE) slice = RR_MINIMUM_SLICE;
if (slice > RR_MAXIMUM_SLICE) slice = RR_MAXIMUM_SLICE;
lev->slice = slice;
 
lev->multiboot = mb;
 
if (createmain)
sys_atrunlevel(RR_call_main,(void *) l, RUNLEVEL_INIT);
 
return l;
}
/shark/trunk/modules/rr/subdir.mk
0,0 → 1,0
OBJS += rr/rr.o
/shark/trunk/modules/sem/sem/sem.h
0,0 → 1,105
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: sem.h,v 1.1 2005-02-25 10:50:43 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:50:43 $
------------
 
This file contains the semaphoric primitives
 
Title:
HARTSEM (Hartik Semaphores)
 
Resource Models Accepted:
None
 
Description:
This module contains a semaphore library compatible with Posix, Plus
an extension to permit post and wait with counters > 1
 
Exceptions raised:
None
 
Restrictions & special features:
- a function isBlocked is provided
- the named semaphores are NOT implemented with a filesystem
- the system supports up to _POSIX_SEM_NSEMS_MAX defined in limits.h
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
#ifndef __MODULES_SEM_H__
#define __MODULES_SEM_H__
 
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
#define SEM_FAILED NULL
typedef int sem_t;
 
/*+ This function must be inserted in the __hartik_register_levels__ +*/
void SEM_register_module(void);
 
int sem_close(sem_t *sem);
int sem_destroy(sem_t *sem);
int sem_getvalue(sem_t *sem, int *sval);
int sem_init(sem_t *sem, int pshared, unsigned int value);
sem_t *sem_open(const char *name, int oflag, ...);
int sem_post(sem_t *sem);
int sem_trywait(sem_t *sem);
int sem_unlink(const char *name);
int sem_wait(sem_t *sem);
 
 
 
int sem_xpost(sem_t *sem, int n);
int sem_xwait(sem_t *sem, int n, int wait);
 
 
int isBlocked(PID i);
 
__END_DECLS
#endif
/shark/trunk/modules/sem/sem.c
0,0 → 1,706
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: sem.c,v 1.1 2005-02-25 10:50:43 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:50:43 $
------------
 
This file contains the Hartik 3.3.1 Semaphore functions
 
Author: Giuseppe Lipari
 
Semaphores:
this is the generalized version of the primitives signal & wait
In this case, the user can specify the number to inc/dec the
semaphore's counter. It is useful in the buffer management
(see port section)
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <stdarg.h>
#include <sem/sem/sem.h>
#include <kernel/config.h>
#include <ll/ll.h>
#include <ll/string.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <limits.h>
#include <fcntl.h>
 
#include <tracer.h>
 
/* Semaphores descriptor tables */
static struct sem_des {
char *name; /* a name, for named semaphores */
int index; /* an index for sem_open, containing the sem number */
int count; /* the semaphore counter */
IQUEUE blocked; /* the blocked processes queue */
int next; /* the semaphore queue */
BYTE used; /* 1 if the semaphore is used */
} sem_table[SEM_NSEMS_MAX];
 
 
/* this -IS- an extension to the proc_table!!! */
static struct {
int decsem; /* the value required in sem_xwait */
int sem; /* the semaphore on whitch the process is blocked */
} sp_table[MAX_PROC];
 
static int free_sem; /* Queue of free sem */
 
 
 
/*----------------------------------------------------------------------*/
/* Cancellation test for semaphores */
/*----------------------------------------------------------------------*/
 
/* this is the test that is done when a task is being killed
and it is waiting on a sigwait */
static int semwait_cancellation_point(PID i, void *arg)
{
LEVEL l;
 
if (proc_table[i].status == WAIT_SEM) {
/* the task that have to be killed is waiting on a sig_wait.
we reset the data structures set in sig_wait and then when the
task will return on the sig_wait it will fall into a
task_testcancel */
 
/* extract the process from the semaphore queue... */
iq_extract(i,&sem_table[ sp_table[i].sem ].blocked);
 
l = proc_table[i].task_level;
level_table[l]->public_unblock(l,i);
 
return 1;
}
 
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Init the semaphoric structures */
/*----------------------------------------------------------------------*/
void SEM_register_module(void)
{
int i;
 
for (i = 0; i < SEM_NSEMS_MAX; i++) {
sem_table[i].name = NULL;
sem_table[i].index = i;
sem_table[i].count = 0;
iq_init(&sem_table[i].blocked, &freedesc, 0);
sem_table[i].next = i+1;
sem_table[i].used = 0;
}
sem_table[SEM_NSEMS_MAX-1].next = NIL;
free_sem = 0;
 
register_cancellation_point(semwait_cancellation_point, NULL);
}
 
/*----------------------------------------------------------------------*/
/* Allocate a semaphoric descriptor and sets the counter to n */
/*----------------------------------------------------------------------*/
 
// the pshared parameter is NRQ for PSE52
int sem_init(sem_t *sem, int pshared, unsigned int value)
{
SYS_FLAGS f;
if (value > SEM_VALUE_MAX)
return EINVAL;
 
f = kern_fsave();
*sem = free_sem;
if (*sem != NIL) {
free_sem = sem_table[*sem].next;
sem_table[*sem].name = NULL;
sem_table[*sem].count = value;
iq_init(&sem_table[*sem].blocked, &freedesc, 0);
sem_table[*sem].used = 1;
}
else {
errno = ENOSPC;
kern_frestore(f);
return -1;
}
kern_frestore(f);
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Frees a semaphores descriptor */
/*----------------------------------------------------------------------*/
int sem_destroy(sem_t *sem)
{
SYS_FLAGS f;
f = kern_fsave();
 
if (*sem < 0 || *sem >= SEM_NSEMS_MAX || !sem_table[*sem].used) {
errno = EINVAL;
kern_frestore(f);
return -1;
}
 
if (sem_table[*sem].blocked.first != NIL) {
errno = EBUSY;
kern_frestore(f);
return -1;
}
 
sem_table[*sem].used = 0;
sem_table[*sem].next = free_sem;
free_sem = *sem;
 
kern_frestore(f);
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Allocate a named semaphore */
/*----------------------------------------------------------------------*/
 
// the pshared parameter is NRQ for PSE52
sem_t *sem_open(const char *name, int oflag, ...)
{
int i, j;
int found = 0;
mode_t m;
sem_t sem;
SYS_FLAGS f;
 
f = kern_fsave();
 
for (i = 0; i < SEM_NSEMS_MAX; i++)
if (sem_table[i].used) {
if (strcmp(name, sem_table[i].name) == 0) {
found = 1;
break;
}
}
if (found) {
if (oflag == (O_CREAT | O_EXCL)) {
errno = EEXIST;
kern_frestore(f);
return SEM_FAILED;
} else {
kern_frestore(f);
return &sem_table[i].index;
}
} else {
if (!(oflag & O_CREAT)) {
errno = ENOENT;
kern_frestore(f);
return SEM_FAILED;
} else {
va_list l;
 
va_start(l, oflag);
m = va_arg(l,mode_t);
j = va_arg(l, int);
va_end(l);
 
if (j > SEM_VALUE_MAX) {
errno = EINVAL;
kern_frestore(f);
return SEM_FAILED;
}
 
sem = free_sem;
if (sem != -1) {
free_sem = sem_table[sem].next;
sem_table[sem].name = kern_alloc(strlen((char *)name)+1);
strcpy(sem_table[sem].name, (char *)name);
sem_table[sem].count = j;
iq_init(&sem_table[sem].blocked, &freedesc, 0);
sem_table[sem].used = 1;
kern_frestore(f);
return &sem_table[sem].index;
}
else {
errno = ENOSPC;
kern_frestore(f);
return SEM_FAILED;
}
}
}
}
 
/*----------------------------------------------------------------------*/
/* Frees a named semaphore */
/*----------------------------------------------------------------------*/
int sem_close(sem_t *sem)
{
SYS_FLAGS f;
 
f = kern_fsave();
 
if (*sem < 0 || *sem >= SEM_NSEMS_MAX || !sem_table[*sem].used) {
errno = EINVAL;
kern_frestore(f);
return -1;
}
 
/* why not???
if (sem_table[*sem].q_first != -1) {
errno = EBUSY;
kern_sti();
return -1;
} */
 
kern_free(sem_table[*sem].name,strlen(sem_table[*sem].name)+1);
sem_table[*sem].used = 0;
sem_table[*sem].next = free_sem;
free_sem = *sem;
 
kern_frestore(f);
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Unlink a named semaphore */
/*----------------------------------------------------------------------*/
int sem_unlink(const char *name)
{
int i;
int found = 0;
SYS_FLAGS f;
 
f = kern_fsave();
 
for (i = 0; i < SEM_NSEMS_MAX; i++)
if (sem_table[i].used) {
if (strcmp(name, sem_table[i].name) == 0) {
found = 1;
}
}
 
if (found) {
kern_free(sem_table[i].name,strlen((char *)name)+1);
sem_table[i].used = 0;
sem_table[i].next = free_sem;
free_sem = i;
kern_frestore(f);
return 0;
} else {
errno = ENOENT;
kern_frestore(f);
return SEM_FAILED;
}
}
 
/*----------------------------------------------------------------------*/
/* Generic wait. If it is possible, decrements the sem counter of n, */
/* else blocks the task. */
/*----------------------------------------------------------------------*/
int sem_wait(sem_t *s)
{
struct sem_des *s1; /* It speeds up access */
 
if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
errno = EINVAL;
return -1;
}
 
task_testcancel();
 
proc_table[exec_shadow].context = kern_context_save();
 
s1 = &sem_table[*s];
 
if (s1->blocked.first != NIL || s1->count == 0) {
/* We must block exec task */
LEVEL l; /* for readableness only */
 
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_set_mutex_wait,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)*s);
 
kern_epilogue_macro();
 
l = proc_table[exec_shadow].task_level;
level_table[l]->public_block(l,exec_shadow);
 
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = WAIT_SEM;
 
/* Prepare sem_table des... */
sp_table[exec_shadow].decsem = 1;
sp_table[exec_shadow].sem = *s;
 
/* ...and put it in sem queue */
iq_insertlast(exec_shadow,&s1->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
kern_context_load(proc_table[exec_shadow].context);
 
/* sem_wait is a cancellation point... */
task_testcancel();
}
else {
s1->count--;
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_set_mutex_wait,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)*s);
kern_context_load(proc_table[exec_shadow].context);
}
 
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Non-blocking wait */
/*----------------------------------------------------------------------*/
int sem_trywait(sem_t *s)
{
struct sem_des *s1; /* It speeds up access */
SYS_FLAGS f;
 
if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
errno = EINVAL;
return -1;
}
 
f = kern_fsave();
 
s1 = &sem_table[*s];
 
if (s1->blocked.first != NIL || s1->count == 0) {
errno = EAGAIN;
kern_frestore(f);
return -1;
}
else
s1->count--;
kern_frestore(f);
return 0;
}
 
 
/*----------------------------------------------------------------------*/
/* Generic wait. If it is possible, decrements the sem counter of n, */
/* else blocks the task. */
/*----------------------------------------------------------------------*/
int sem_xwait(sem_t *s, int n, int wait)
{
struct sem_des *s1; /* It speeds up access */
 
if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
errno = EINVAL;
return -1;
}
 
/* We do not need to save context if we are sure we shall not block! */
if (wait == NON_BLOCK)
kern_cli();
else
proc_table[exec_shadow].context = kern_context_save();
 
s1 = &sem_table[*s];
 
/* The non blocking wait is really simple! */
/* We do not suspend or schedule anything */
if (wait == NON_BLOCK) {
if (s1->blocked.first != NIL || s1->count < n) {
errno = EAGAIN;
kern_sti();
return -1;
}
else
s1->count -= n;
 
kern_sti();
return 0;
}
/* The blocking wait is more complex... */
else {
/* the blocking wait is a cancellation point */
task_testcancel();
 
if (s1->blocked.first != NIL || s1->count < n) {
/* We must block exec task */
LEVEL l; /* for readableness only */
 
/* tracer */
TRACER_LOGEVENT(FTrace_EVT_set_mutex_wait,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)*s);
kern_epilogue_macro();
l = proc_table[exec_shadow].task_level;
level_table[l]->public_block(l,exec_shadow);
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = WAIT_SEM;
/* Prepare sem_table des... */
sp_table[exec_shadow].decsem = n;
sp_table[exec_shadow].sem = *s;
/* ...and put it in sem queue */
iq_insertlast(exec_shadow,&s1->blocked);
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
kern_context_load(proc_table[exec_shadow].context);
/* sem_wait is a cancellation point... */
task_testcancel();
}
else {
s1->count -= n;
/* tracer */
TRACER_LOGEVENT(FTrace_EVT_set_mutex_wait,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)*s);
kern_context_load(proc_table[exec_shadow].context);
}
}
 
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Generic signal. It increments the sem counter of 1, and wakes one */
/* of the tasks that are blocked on the semaphore, if it is possible.The*/
/* semaphoric queue is a FIFO queue, in order to eliminate deadlocks */
/*----------------------------------------------------------------------*/
int sem_post(sem_t *s)
{
struct sem_des *s1; /* it speeds up access */
int p; /* idem */
LEVEL l;
 
if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
errno = EINVAL;
return -1;
}
 
// ugly patch to call a sem_post!!!
if (ll_ActiveInt()) {
SYS_FLAGS f;
f = kern_fsave();
s1 = &sem_table[*s];
s1->count ++; /* inc sem count */
 
p = s1->blocked.first;
if (p != NIL && sp_table[p].decsem <= s1->count) {
/* Dec sem count */
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
iq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->public_unblock(l,p);
/* only a task can be awaken */
/* Preempt if necessary */
event_need_reschedule();
}
/* tracer */
TRACER_LOGEVENT(FTrace_EVT_set_mutex_lock,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)*s);
kern_frestore(f);
}
else {
proc_table[exec].context = kern_context_save();
s1 = &sem_table[*s];
s1->count ++; /* inc sem count */
p = s1->blocked.first;
if (p != NIL && sp_table[p].decsem <= s1->count) {
/* Dec sem count */
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
iq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->public_unblock(l,p);
/* only a task can be awaken */
/* Preempt if necessary */
scheduler();
}
/* tracer */
TRACER_LOGEVENT(FTrace_EVT_set_mutex_lock,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)*s);
kern_context_load(proc_table[exec_shadow].context);
}
 
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Generic signal. It increments the sem counter of n, and wakes all the*/
/* tasks that are blocked on the semaphore, if it is possible. The */
/* semaphoric queue is a FIFO queue, in order to eliminate deadlocks */
/*----------------------------------------------------------------------*/
int sem_xpost(sem_t *s, int n)
{
struct sem_des *s1; /* it speeds up access */
int p; /* idem */
int fl = 0; /* a flag */
LEVEL l;
 
if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
errno = EINVAL;
return -1;
}
 
// ugly patch to call a sem_post!!!
if (ll_ActiveInt()) {
SYS_FLAGS f;
f = kern_fsave();
s1 = &sem_table[*s];
s1->count += n; /* inc sem count */
p = s1->blocked.first;
while (p != NIL && sp_table[p].decsem <= s1->count) {
/* Dec sem count */
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
iq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->public_unblock(l,p);
/* Next task to wake */
p = s1->blocked.first;
fl = 1;
}
/* tracer */
TRACER_LOGEVENT(FTrace_EVT_set_mutex_lock,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)*s);
/* Preempt if necessary */
if (fl) event_need_reschedule();
kern_frestore(f);
}
else {
proc_table[exec].context = kern_context_save();
s1 = &sem_table[*s];
s1->count += n; /* inc sem count */
p = s1->blocked.first;
while (p != NIL && sp_table[p].decsem <= s1->count) {
/* Dec sem count */
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
iq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->public_unblock(l,p);
/* Next task to wake */
p = s1->blocked.first;
fl = 1;
}
/* tracer */
TRACER_LOGEVENT(FTrace_EVT_set_mutex_lock,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)*s);
/* Preempt if necessary */
if (fl) scheduler();
kern_context_load(proc_table[exec_shadow].context);
}
 
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Getvalue returns the value of the semaphore (>=0). If someone is */
/* blocked on the semaphore, return the number of process blocked (<0) */
/*----------------------------------------------------------------------*/
int sem_getvalue(sem_t *sem, int *sval)
{
PID p;
SYS_FLAGS f;
 
if (*sem < 0 || *sem >= SEM_NSEMS_MAX || !sem_table[*sem].used) {
errno = EINVAL;
return -1;
}
 
f = kern_fsave();
 
if (iq_isempty(&sem_table[*sem].blocked))
/* the sem is free */
*sval = sem_table[*sem].count;
else {
/* the sem is busy */
*sval = 0;
p = iq_query_first(&sem_table[*sem].blocked);
do {
(*sval)--;
p = iq_query_next(p, &sem_table[*sem].blocked);
} while (p != NIL);
}
 
kern_frestore(f);
return 0;
}
 
 
/*----------------------------------------------------------------------*/
/* this function returns 1 if the task is blocked on a semaphore */
/*----------------------------------------------------------------------*/
int isBlocked(PID i)
{
if (proc_table[i].status == WAIT_SEM) return 1;
else return 0;
}
 
/shark/trunk/modules/sem/subdir.mk
0,0 → 1,0
OBJS += sem/sem.o
/shark/trunk/modules/elastic/elastic/elastic.h
0,0 → 1,78
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __ELASTIC_H__
#define __ELASTIC_H__
 
#include <kernel/config.h>
#include <kernel/types.h>
 
__BEGIN_DECLS
 
/*+ flags... +*/
#define ELASTIC_DISABLE_ALL 0 /*+ Task Guarantee enabled +*/
#define ELASTIC_ENABLE_GUARANTEE 1 /*+ Task Guarantee enabled +*/
#define ELASTIC_ENABLE_ALL 1
 
/* C Scaling factor define */
#define SCALING_UNIT 10000
 
LEVEL ELASTIC_register_level(int flags, LEVEL master, ext_bandwidth_t U);
 
int ELASTIC_set_period(PID p, TIME period);
int ELASTIC_get_period(PID p);
 
int ELASTIC_set_Tmin(PID p, TIME Tmin);
int ELASTIC_get_Tmin(PID p);
 
int ELASTIC_set_Tmax(PID p, TIME Tmax);
int ELASTIC_get_Tmax(PID p);
 
int ELASTIC_set_C(PID p, TIME C);
int ELASTIC_get_C(PID p);
 
int ELASTIC_set_E(PID p, int E);
int ELASTIC_get_E(PID p);
 
int ELASTIC_set_beta(PID p, int beta);
int ELASTIC_get_beta(PID p);
 
int ELASTIC_set_bandwidth(LEVEL lev, ext_bandwidth_t);
ext_bandwidth_t ELASTIC_get_bandwidth(LEVEL lev);
 
int ELASTIC_set_scaling_factor(LEVEL level, int scaling_factor);
int ELASTIC_get_scaling_factor(LEVEL level);
 
__END_DECLS
#endif
/shark/trunk/modules/elastic/elastic.c
0,0 → 1,1021
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors:
* Giacomo Guidi <giacomo@gandalf.sssup.it>
* Mauro Marinoni
* Anton Cervin
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <ll/i386/64bit.h>
 
#include <stdlib.h>
 
#include <elastic/elastic/elastic.h>
 
#include <tracer.h>
 
/* Task flags */
 
#define ELASTIC_PRESENT 1
#define ELASTIC_JOB_PRESENT 2
 
/* Task statuses */
 
#define ELASTIC_IDLE APER_STATUS_BASE
 
//#define ELASTIC_DEBUG
 
#ifdef ELASTIC_DEBUG
char *pnow() {
static char buf[40];
struct timespec t;
kern_gettime(&t);
sprintf(buf, "%ld.%06ld", t.tv_sec, t.tv_nsec/1000);
return buf;
}
char *ptime1(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
char *ptime2(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
#endif
 
 
typedef struct {
 
/* Task parameters (set/changed by the user) */
 
TIME Tmin; /* The nominal (minimum) period */
TIME Tmax; /* The maximum tolerable period */
TIME C; /* The declared worst-case execution time */
int E; /* The elasticity coefficient */
int beta; /* PERIOD_SCALING or WCET_SCALING */
 
/* Task variables (changed by the module) */
 
struct timespec release; /* The current activation time */
struct timespec dline; /* The current absolute deadline */
int dltimer; /* Deadline timer handle */
ext_bandwidth_t Umax; /* The maximum utilization, Umax = C/Tmin */
ext_bandwidth_t Umin; /* The minimum utilization, Umin = C/Tmax */
 
ext_bandwidth_t U; /* New assigned utilization */
ext_bandwidth_t oldU; /* Old utilization */
TIME T; /* The current period, T = C/U */
 
int flags;
 
} ELASTIC_task_descr;
 
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
ext_bandwidth_t U; /*+ the bandwidth reserved for elastic tasks +*/
 
int c_scaling_factor; /*+ the computation time scaling factor +*/
 
ELASTIC_task_descr elist[MAX_PROC];
 
LEVEL scheduling_level;
 
LEVEL current_level;
 
int flags;
 
} ELASTIC_level_des;
 
 
static void ELASTIC_activation(ELASTIC_level_des *lev, PID p,
struct timespec *acttime)
{
JOB_TASK_MODEL job;
ELASTIC_task_descr *et = &lev->elist[p];
 
/* Assign release time */
et->release = *acttime;
 
/* Assign absolute deadline */
et->dline = *acttime;
ADDUSEC2TIMESPEC(et->T, &et->dline);
 
#ifdef ELASTIC_DEBUG
/* cprintf("At %s: activating %s; rel=%s; dl=%s\n", pnow(), proc_table[p].name,
ptime1(&et->release), ptime2(&et->dline)); */
#endif
 
mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,proc_table[p].avail_time);
mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,proc_table[p].wcet);
 
/* Job insertion */
job_task_default_model(job, et->dline);
level_table[lev->scheduling_level]->
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job);
et->flags |= ELASTIC_JOB_PRESENT;
}
 
 
static void ELASTIC_timer_act(void *arg) {
 
PID p = (PID)(arg);
ELASTIC_level_des *lev;
lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
ELASTIC_task_descr *et = &lev->elist[p];
 
/* Use the current deadline as the new activation time */
ELASTIC_activation(lev, p, &et->dline);
 
event_need_reschedule();
 
/* Next activation */
et->dltimer = kern_event_post(&et->dline, ELASTIC_timer_act, (void *)(p));
}
 
 
/* Check feasability and compute new utilizations for the task set */
 
static int ELASTIC_compress(ELASTIC_level_des *lev) {
 
PID i;
ELASTIC_task_descr *et;
int ok;
ext_bandwidth_t Umin; // minimum utilization
ext_bandwidth_t Umax; // nominal (maximum) utilization of compressable tasks
unsigned int temp;
 
ext_bandwidth_t Uf; // amount of non-compressable utilization
int Ev; // sum of elasticity among compressable tasks
 
JOB_TASK_MODEL job;
 
Umin = 0;
Umax = 0;
 
for (i=0; i<MAX_PROC; i++) {
et = &lev->elist[i];
if (et->flags & ELASTIC_PRESENT) {
if (et->E == 0) {
Umin += et->U;
Umax += et->U;
} else {
Umin += et->Umin;
Umax += et->Umax;
et->U = et->Umax; // reset previous saturations (if any)
}
}
}
 
if (Umin > lev->U) {
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_compress: Task set not feasible\n");
#endif
return -1; // NOT FEASIBLE
}
 
if (Umax <= lev->U) {
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_compress: Task set feasible with maximum utilizations\n");
#endif
 
} else {
 
do {
Uf = 0;
Ev = 0;
Umax = 0;
for (i=0; i<MAX_PROC; i++) {
et = &lev->elist[i];
if (et->flags & ELASTIC_PRESENT) {
if (et->E == 0 || et->U == et->Umin) {
Uf += et->U;
} else {
Ev += et->E;
Umax += et->Umax;
}
}
}
ok = 1;
for (i=0; i<MAX_PROC; i++) {
et = &lev->elist[i];
if (et->flags & ELASTIC_PRESENT) {
if (et->E > 0 && et->U > et->Umin) {
et->U = et->Umax - (Umax - lev->U + Uf) * et->E / Ev;
if (et->U < et->Umin) {
et->U = et->Umin;
ok = 0;
}
}
}
}
} while (ok == 0);
}
 
// Increase periods of compressed tasks IMMEDIATELY.
// The other ones will be changed at their next activation
 
for (i=0; i<MAX_PROC; i++) {
et = &lev->elist[i];
if (et->flags & ELASTIC_PRESENT) {
if (et->U != et->oldU) {
/* Utilization has been changed. Compute new period */
temp = (long long)et->C * (long long)MAX_BANDWIDTH / et->U;
mul32div32to32(temp,lev->c_scaling_factor,SCALING_UNIT,et->T);
}
if (et->U < et->oldU) {
/* Task has been compressed. Change its deadline NOW! */
if (et->flags & ELASTIC_JOB_PRESENT) {
/* Remove job from level */
level_table[lev->scheduling_level]->
private_extract(lev->scheduling_level, i);
}
/* Compute new deadline */
et->dline = et->release;
ADDUSEC2TIMESPEC(et->T, &et->dline);
if (et->dltimer != -1) {
/* Delete old deadline timer, post new one */
kern_event_delete(et->dltimer);
et->dltimer = kern_event_post(&et->dline, ELASTIC_timer_act,(void *)(i));
}
if (et->flags & ELASTIC_JOB_PRESENT) {
/* Reinsert job */
job_task_default_model(job, et->dline);
level_table[lev->scheduling_level]->
private_insert(lev->scheduling_level, i, (TASK_MODEL *)&job);
}
}
et->oldU = et->U; /* Update oldU */
}
}
 
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_compress: New periods: ");
for (i=0; i<MAX_PROC; i++) {
et = &lev->elist[i];
if (et->flags & ELASTIC_PRESENT) {
cprintf("%s:%d ", proc_table[i].name, (int)et->T);
}
}
cprintf("\n");
#endif
 
return 0; // FEASIBLE
 
}
 
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int ELASTIC_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= (unsigned int)lev->U;
return 1;
} else {
return 0;
}
 
}
 
 
static int ELASTIC_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
ELASTIC_TASK_MODEL *elastic = (ELASTIC_TASK_MODEL *)m;
ELASTIC_task_descr *et = &lev->elist[p];
unsigned int temp;
 
if (m->pclass != ELASTIC_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
 
if (elastic->C == 0) return -1;
if (elastic->Tmin > elastic->Tmax) return -1;
if (elastic->Tmax == 0) return -1;
if (elastic->Tmin == 0) return -1;
 
NULL_TIMESPEC(&(et->dline));
et->Tmin = elastic->Tmin;
et->Tmax = elastic->Tmax;
et->C = elastic->C;
et->E = elastic->E;
et->beta = elastic->beta;
 
mul32div32to32(elastic->C,lev->c_scaling_factor,SCALING_UNIT,temp);
et->Umax = ((long long)MAX_BANDWIDTH * (long long)temp) / (long long)elastic->Tmin;
et->Umin = ((long long)MAX_BANDWIDTH * (long long)temp) / (long long)elastic->Tmax;
 
et->U = et->Umax;
et->oldU = 0;
et->T = et->Tmin;
et->dltimer = -1;
 
et->flags |= ELASTIC_PRESENT;
if (ELASTIC_compress(lev) == -1) {
et->flags &= ~ELASTIC_PRESENT;
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_public_create: compression failed!\n");
#endif
return -1;
}
 
mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,proc_table[p].avail_time);
mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,proc_table[p].wcet);
 
proc_table[p].control |= CONTROL_CAP;
 
return 0;
}
 
 
static void ELASTIC_public_detach(LEVEL l, PID p)
{
//ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
 
}
 
static int ELASTIC_public_eligible(LEVEL l, PID p)
{
//ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
 
return 0;
 
}
 
static void ELASTIC_public_dispatch(LEVEL l, PID p, int nostop)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
 
level_table[ lev->scheduling_level ]->
private_dispatch(lev->scheduling_level,p,nostop);
 
}
 
static void ELASTIC_public_epilogue(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
if (proc_table[p].avail_time <= 0) {
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,
(unsigned short int)proc_table[p].context,0);
kern_raise(XWCET_VIOLATION,p);
}
 
level_table[lev->scheduling_level]->
private_epilogue(lev->scheduling_level,p);
 
}
 
static void ELASTIC_public_activate(LEVEL l, PID p, struct timespec *t)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
ELASTIC_task_descr *et = &lev->elist[p];
 
/* check if we are not in the SLEEP state */
if (proc_table[p].status != SLEEP) {
return;
}
 
ELASTIC_activation(lev,p,t);
 
/* Next activation */
et->dltimer = kern_event_post(&et->dline, ELASTIC_timer_act, (void *)(p));
 
}
 
static void ELASTIC_public_unblock(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
struct timespec acttime;
 
kern_gettime(&acttime);
 
ELASTIC_activation(lev,p,&acttime);
 
}
 
static void ELASTIC_public_block(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
ELASTIC_task_descr *et = &lev->elist[p];
 
level_table[lev->scheduling_level]->
private_extract(lev->scheduling_level,p);
et->flags &= ~ELASTIC_JOB_PRESENT;
 
}
 
static int ELASTIC_public_message(LEVEL l, PID p, void *m)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
ELASTIC_task_descr *et = &lev->elist[p];
 
switch((long)(m)) {
 
case (long)(NULL):
 
level_table[lev->scheduling_level]->
private_extract(lev->scheduling_level,p);
et->flags &= ~ELASTIC_JOB_PRESENT;
 
proc_table[p].status = ELASTIC_IDLE;
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
break;
 
case 1:
 
if (et->dltimer != -1)
kern_event_delete(et->dltimer);
 
if (et->flags & ELASTIC_JOB_PRESENT) {
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
et->flags &= ~ELASTIC_JOB_PRESENT;
}
 
proc_table[p].status = SLEEP;
 
TRACER_LOGEVENT(FTrace_EVT_task_disable,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
break;
 
}
 
return 0;
 
}
 
static void ELASTIC_public_end(LEVEL l, PID p)
{
ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
ELASTIC_task_descr *et = &lev->elist[p];
 
if (et->dltimer != -1) {
kern_event_delete(et->dltimer);
}
 
if (et->flags & ELASTIC_JOB_PRESENT) {
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
et->flags &= ~ELASTIC_JOB_PRESENT;
}
 
et->flags &= ~ELASTIC_PRESENT;
 
ELASTIC_compress(lev); // Tasks may want to expand
}
 
/*+ Registration function +*/
LEVEL ELASTIC_register_level(int flags, LEVEL master, ext_bandwidth_t U)
{
LEVEL l; /* the level that we register */
ELASTIC_level_des *lev; /* for readableness only */
PID i;
 
printk("ELASTIC_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(ELASTIC_level_des));
 
lev = (ELASTIC_level_des *)level_table[l];
 
/* fill the standard descriptor */
if (flags & ELASTIC_ENABLE_GUARANTEE)
lev->l.public_guarantee = ELASTIC_public_guarantee;
else
lev->l.public_guarantee = NULL;
lev->l.public_create = ELASTIC_public_create;
lev->l.public_detach = ELASTIC_public_detach;
lev->l.public_end = ELASTIC_public_end;
lev->l.public_eligible = ELASTIC_public_eligible;
lev->l.public_dispatch = ELASTIC_public_dispatch;
lev->l.public_epilogue = ELASTIC_public_epilogue;
lev->l.public_activate = ELASTIC_public_activate;
lev->l.public_unblock = ELASTIC_public_unblock;
lev->l.public_block = ELASTIC_public_block;
lev->l.public_message = ELASTIC_public_message;
 
/* fill the ELASTIC task descriptor part */
for (i=0; i<MAX_PROC; i++) {
NULL_TIMESPEC(&(lev->elist[i].dline));
lev->elist[i].Tmin = 0;
lev->elist[i].Tmax = 0;
lev->elist[i].T = 0;
lev->elist[i].U = 0;
lev->elist[i].C = 0;
lev->elist[i].E = 0;
lev->elist[i].beta = 0;
lev->elist[i].flags = 0;
}
 
lev->c_scaling_factor = SCALING_UNIT;
 
lev->U = U;
 
lev->scheduling_level = master;
 
lev->current_level = l;
 
lev->flags = 0;
 
return l;
}
 
 
 
/* Force the period of task p to a given value (between Tmin and Tmax) */
 
int ELASTIC_set_period(PID p, TIME period) {
 
SYS_FLAGS f;
int saveE;
unsigned int temp;
ext_bandwidth_t saveU;
TIME saveT;
 
f = kern_fsave();
 
ELASTIC_level_des *lev;
lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
ELASTIC_task_descr *et = &lev->elist[p];
 
if (period < et->Tmin || period > et->Tmax) {
kern_frestore(f);
return -1;
}
 
saveE = et->E;
saveU = et->U;
saveT = et->T;
 
et->E = 0; /* set elasticity to zero to force period */
mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,temp);
et->U = ((long long)MAX_BANDWIDTH * (long long)(temp))/((long long)period);
et->T = period;
if (ELASTIC_compress(lev) == -1) {
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_set_period failed!\n");
#endif
et->E = saveE;
et->U = saveU;
et->T = saveT;
kern_frestore(f);
return -1;
}
 
et->E = saveE; /* Restore E when compression is done */
kern_frestore(f);
return 0;
}
 
int ELASTIC_get_period(PID p) {
 
SYS_FLAGS f;
ELASTIC_level_des *lev;
lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
TIME retval;
 
f = kern_fsave();
 
if (lev->elist[p].flags & ELASTIC_PRESENT) {
retval = lev->elist[p].T;
kern_frestore(f);
return retval;
 
} else {
 
kern_frestore(f);
return -1;
 
}
 
}
 
 
int ELASTIC_set_Tmin(PID p, TIME Tmin)
{
SYS_FLAGS f;
ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
ELASTIC_task_descr *et = &lev->elist[p];
TIME saveTmin;
TIME saveT;
ext_bandwidth_t saveU;
unsigned int temp;
 
f = kern_fsave();
if (et->flags & ELASTIC_PRESENT) {
saveTmin = et->Tmin;
saveT = et->T;
saveU = et->U;
et->Tmin = Tmin;
if (Tmin > et->T) {
et->T = Tmin;
mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,temp);
et->U = ((long long)MAX_BANDWIDTH * (long long)(temp))/((long long)Tmin);
}
 
if (ELASTIC_compress(lev) == -1) {
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_set_Tmin failed: could not compress\n");
#endif
et->Tmin = saveTmin;
et->T = saveT;
et->U = saveU;
kern_frestore(f);
return -1;
}
kern_frestore(f);
return 0;
} else {
 
kern_frestore(f);
return -1;
}
}
 
 
int ELASTIC_get_Tmin(PID p) {
 
SYS_FLAGS f;
ELASTIC_level_des *lev;
lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
TIME retval;
 
f = kern_fsave();
 
if (lev->elist[p].flags & ELASTIC_PRESENT) {
retval = lev->elist[p].Tmin;
kern_frestore(f);
return retval;
 
} else {
 
kern_frestore(f);
return -1;
 
}
 
}
 
 
int ELASTIC_set_Tmax(PID p, TIME Tmax)
{
SYS_FLAGS f;
ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
ELASTIC_task_descr *et = &lev->elist[p];
TIME saveTmax;
TIME saveT;
ext_bandwidth_t saveU;
unsigned int temp;
 
f = kern_fsave();
if (et->flags & ELASTIC_PRESENT) {
saveTmax = et->Tmax;
saveT = et->T;
saveU = et->U;
et->Tmax = Tmax;
if (Tmax < et->T) {
et->T = Tmax;
mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,temp);
et->U = ((long long)MAX_BANDWIDTH * (long long)(temp))/((long long)Tmax);
}
 
if (ELASTIC_compress(lev) == -1) {
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_set_Tmax failed: could not compress\n");
#endif
et->Tmax = saveTmax;
et->T = saveT;
et->U = saveU;
kern_frestore(f);
return -1;
}
kern_frestore(f);
return 0;
} else {
 
kern_frestore(f);
return -1;
}
}
 
 
int ELASTIC_get_Tmax(PID p) {
 
SYS_FLAGS f;
ELASTIC_level_des *lev;
lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
TIME retval;
 
f = kern_fsave();
 
if (lev->elist[p].flags & ELASTIC_PRESENT) {
retval = lev->elist[p].Tmax;
kern_frestore(f);
return retval;
 
} else {
 
kern_frestore(f);
return -1;
 
}
 
}
 
int ELASTIC_set_C(PID p, TIME C)
{
SYS_FLAGS f;
ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
ELASTIC_task_descr *et = &lev->elist[p];
TIME saveC;
ext_bandwidth_t saveU;
unsigned int temp;
 
f = kern_fsave();
if (et->flags & ELASTIC_PRESENT) {
saveC = et->C;
saveU = et->U;
et->C = C;
 
mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,temp);
et->U = ((long long)MAX_BANDWIDTH * (long long)(temp))/((long long)et->T);
 
if (ELASTIC_compress(lev) == -1) {
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_set_C failed: could not compress\n");
#endif
et->C = saveC;
et->U = saveU;
kern_frestore(f);
return -1;
}
kern_frestore(f);
return 0;
} else {
 
kern_frestore(f);
return -1;
}
}
 
 
int ELASTIC_get_C(PID p) {
 
SYS_FLAGS f;
ELASTIC_level_des *lev;
lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
TIME retval;
 
f = kern_fsave();
 
if (lev->elist[p].flags & ELASTIC_PRESENT) {
retval = lev->elist[p].C;
kern_frestore(f);
return retval;
 
} else {
 
kern_frestore(f);
return -1;
 
}
 
}
 
 
int ELASTIC_set_E(PID p, int E)
{
SYS_FLAGS f;
ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
ELASTIC_task_descr *et = &lev->elist[p];
int saveE;
 
f = kern_fsave();
if (et->flags & ELASTIC_PRESENT) {
saveE = et->E;
et->E = E;
if (ELASTIC_compress(lev) == -1) {
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_set_E failed: could not compress\n");
#endif
et->E = saveE;
kern_frestore(f);
return -1;
}
kern_frestore(f);
return 0;
} else {
 
kern_frestore(f);
return -1;
}
}
 
int ELASTIC_get_E(PID p) {
 
SYS_FLAGS f;
ELASTIC_level_des *lev;
lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
 
f = kern_fsave();
 
if (lev->elist[p].flags & ELASTIC_PRESENT) {
 
kern_frestore(f);
return lev->elist[p].E;
 
} else {
 
kern_frestore(f);
return -1;
}
}
 
int ELASTIC_set_beta(PID p, int beta) {
 
SYS_FLAGS f;
ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
ELASTIC_task_descr *et = &lev->elist[p];
int saveBeta;
 
f = kern_fsave();
 
if (et->flags & ELASTIC_PRESENT) {
 
saveBeta = et->beta;
 
et->beta = beta;
 
if (ELASTIC_compress(lev) == -1) {
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_set_beta failed: could not compress\n");
#endif
et->beta = saveBeta;
kern_frestore(f);
return -1;
}
 
kern_frestore(f);
return 0;
 
} else {
 
kern_frestore(f);
return -1;
 
}
 
}
 
int ELASTIC_get_beta(PID p) {
 
SYS_FLAGS f;
ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
int retval;
 
f = kern_fsave();
 
if (lev->elist[p].flags & ELASTIC_PRESENT) {
retval = lev->elist[p].beta;
kern_frestore(f);
return retval;
 
} else {
 
kern_frestore(f);
return -1;
 
}
 
}
 
int ELASTIC_set_bandwidth(LEVEL level, ext_bandwidth_t U) {
 
SYS_FLAGS f;
ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[level];
f = kern_fsave();
lev->U = U;
 
if (ELASTIC_compress(lev) == -1) {
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_set_bandwidth failed: could not compress\n");
#endif
kern_frestore(f);
return -1;
}
 
kern_frestore(f);
return 0;
 
}
 
ext_bandwidth_t ELASTIC_get_bandwidth(LEVEL level) {
 
ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[level];;
return lev->U;
 
}
 
int ELASTIC_set_scaling_factor(LEVEL level, int scaling_factor) {
SYS_FLAGS f;
ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[level];
f = kern_fsave();
lev->c_scaling_factor = scaling_factor;
if (ELASTIC_compress(lev) == -1) {
#ifdef ELASTIC_DEBUG
cprintf("ELASTIC_set_scaling_factor failed: could not compress\n");
#endif
kern_frestore(f);
return -1;
}
kern_frestore(f);
return 0;
}
 
int ELASTIC_get_scaling_factor(LEVEL level) {
ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[level];;
return lev->c_scaling_factor;
}
/shark/trunk/modules/elastic/subdir.mk
0,0 → 1,0
OBJS += elastic/elastic.o
/shark/trunk/modules/intdrive/intdrive/intdrive.h
0,0 → 1,61
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* (see the web pages for full authors list)
* Giacomo Guidi <giacomo@gandalf.sssup.it>
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __INTDRIVE_H__
#define __INTDRIVE_H__
 
#include <ll/ll.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
#define INTDRIVE_CHECK_WCET 1
 
LEVEL INTDRIVE_register_level(TIME capacity, TIME replenish_period, int flag);
 
/*+ Returns the used bandwidth of a level +*/
bandwidth_t INTDRIVE_usedbandwidth(LEVEL l);
 
/*+ Set the Q-theta value for the server - Return the accepted value +*/
TIME INTDRIVE_set_q_theta(LEVEL l, TIME new_q_theta);
 
__END_DECLS
#endif
/shark/trunk/modules/intdrive/intdrive.c
0,0 → 1,394
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Giacomo Guidi <giacomo@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/*
* Copyright (C) 2000,2002 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
/* Interrupt Driver Module */
 
#include <intdrive/intdrive/intdrive.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <tracer.h>
 
#include <ll/i386/64bit.h>
 
/*+ Status used in the level +*/
#define INTDRIVE_READY MODULE_STATUS_BASE /*+ - Ready status +*/
#define INTDRIVE_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/
#define INTDRIVE_IDLE MODULE_STATUS_BASE+3 /*+ to wait the replenish +*/
#define INTDRIVE_WAIT MODULE_STATUS_BASE+4 /*+ to wait the activation */
 
//#define INTDRIVE_DEBUG
 
/*+ the level redefinition for the IntDrive +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
TIME replenish_period;
TIME capacity;
TIME q_theta;
 
struct timespec act_time;
 
int avail;
int replenish_timer;
 
//struct timespec replenish_expires;
//int wcet_timer;
 
int act_number; /*+ the activation number +*/
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth +*/
 
} INTDRIVE_level_des;
 
PID INTDRIVE_task = NIL;
 
/* Replenish the capacity */
static void INTDRIVE_timer(void *arg)
{
INTDRIVE_level_des *lev = (INTDRIVE_level_des *)(arg);
 
lev->replenish_timer = NIL;
 
#ifdef INTDRIVE_DEBUG
kern_printf("(INTD:TIMER)");
#endif
 
if (INTDRIVE_task == NIL) return;
 
lev->avail = lev->q_theta;
TRACER_LOGEVENT(FTrace_EVT_user_event_0, 0, lev->avail + INT_MAX);
 
switch (proc_table[INTDRIVE_task].status) {
 
case INTDRIVE_IDLE:
if (lev->act_number) {
proc_table[INTDRIVE_task].status = INTDRIVE_READY;
event_need_reschedule();
} else {
proc_table[INTDRIVE_task].status = INTDRIVE_WAIT;
}
break;
}
}
 
/*static void INTDRIVE_wcet_timer(void *arg)
{
 
INTDRIVE_level_des *lev = (INTDRIVE_level_des *)(arg);
 
lev->wcet_timer = NIL;
kern_raise(XWCET_VIOLATION,INTDRIVE_task);
 
}*/
 
static PID INTDRIVE_public_scheduler(LEVEL l)
{
 
if (INTDRIVE_task == NIL) return NIL;
if (proc_table[INTDRIVE_task].status == INTDRIVE_READY ||
proc_table[INTDRIVE_task].status == EXE)
return INTDRIVE_task;
else
return NIL;
 
}
 
static int INTDRIVE_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
 
HARD_TASK_MODEL *h;
 
if (m->pclass != HARD_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
h = (HARD_TASK_MODEL *)m;
if (!h->wcet && h->periodicity != INTDRIVE) return -1;
 
if (INTDRIVE_task != NIL) return -1;
 
INTDRIVE_task = p;
 
proc_table[INTDRIVE_task].wcet = h->wcet;
proc_table[INTDRIVE_task].avail_time = h->wcet;
proc_table[INTDRIVE_task].status = INTDRIVE_WAIT;
proc_table[INTDRIVE_task].control &= ~CONTROL_CAP;
return 0;
 
}
 
static void INTDRIVE_public_dispatch(LEVEL l, PID p, int nostop)
{
 
INTDRIVE_level_des *lev = (INTDRIVE_level_des *)(level_table[l]);
//struct timespec time;
 
kern_gettime(&(lev->act_time));
/*TIMESPEC_ASSIGN(&time,&(lev->act_time));
ADDUSEC2TIMESPEC(proc_table[INTDRIVE_task].wcet,&time);
 
if (lev->flags == INTDRIVE_CHECK_WCET)
lev->wcet_timer = kern_event_post(&time,INTDRIVE_wcet_timer,(void *)lev);*/
}
 
static void INTDRIVE_public_epilogue(LEVEL l, PID p)
{
 
struct timespec time;
 
INTDRIVE_level_des *lev = (INTDRIVE_level_des *)(level_table[l]);
 
/*if (lev->wcet_timer != NIL)
kern_event_delete(lev->wcet_timer);*/
SUBTIMESPEC(&schedule_time, &(lev->act_time), &time);
lev->avail -= TIMESPEC2USEC(&time);
TRACER_LOGEVENT(FTrace_EVT_user_event_0, 0, lev->avail + INT_MAX);
if (proc_table[INTDRIVE_task].wcet < TIMESPEC2USEC(&time)) {
kern_raise(XWCET_VIOLATION,INTDRIVE_task);
}
}
 
static void INTDRIVE_public_activate(LEVEL l, PID p, struct timespec *t)
{
struct timespec acttime;
TIME time, delta_capacity;
 
INTDRIVE_level_des *lev = (INTDRIVE_level_des *)(level_table[l]);
 
if (proc_table[INTDRIVE_task].status == INTDRIVE_WAIT) {
 
proc_table[INTDRIVE_task].status = INTDRIVE_READY;
 
lev->act_number++;
 
} else {
 
if (proc_table[INTDRIVE_task].status == INTDRIVE_IDLE ||
proc_table[INTDRIVE_task].status == INTDRIVE_READY ||
proc_table[INTDRIVE_task].status == EXE) {
 
#ifdef INTDRIVE_DEBUG
kern_printf("(INTD:WAIT_REC)");
#endif
lev->act_number++;
 
}
 
}
 
if (lev->replenish_timer == NIL) {
 
delta_capacity = lev->q_theta - lev->avail;
mul32div32to32(delta_capacity, MAX_BANDWIDTH, lev->U, time);
kern_gettime(&acttime);
ADDUSEC2TIMESPEC(time,&acttime);
lev->replenish_timer = kern_event_post(&acttime,INTDRIVE_timer,(void *)lev);
/*kern_gettime(&(lev->replenish_expires));
ADDUSEC2TIMESPEC(lev->replenish_period,&(lev->replenish_expires));
lev->replenish_timer = kern_event_post(&(lev->replenish_expires),INTDRIVE_timer,(void *)lev);*/
}
}
 
static void INTDRIVE_public_unblock(LEVEL l, PID p)
{
/* Insert task in the correct position */
proc_table[INTDRIVE_task].status = INTDRIVE_READY;
}
 
static void INTDRIVE_public_block(LEVEL l, PID p)
{
 
}
 
static int INTDRIVE_public_message(LEVEL l, PID p, void *m)
{
INTDRIVE_level_des *lev = (INTDRIVE_level_des *)(level_table[l]);
struct timespec time, acttime;
//int delta_time;
TIME delta_capacity, delta_time;
lev->act_number--;
 
/*if (lev->wcet_timer != NIL)
kern_event_delete(lev->wcet_timer);*/
 
kern_gettime(&acttime);
SUBTIMESPEC(&acttime, &(lev->act_time), &time);
delta_time = TIMESPEC2USEC(&time);
mul32div32to32(delta_time, (1-lev->U), MAX_BANDWIDTH, delta_capacity);
lev->avail -= delta_capacity;
//lev->avail -= TIMESPEC2USEC(&time);
 
TRACER_LOGEVENT(FTrace_EVT_user_event_0, 0, lev->avail + INT_MAX);
#ifdef INTDRIVE_DEBUG
kern_printf("(INTD:AV:%d)",(int)(lev->avail));
#endif
 
if (lev->avail < 0) {
proc_table[INTDRIVE_task].status = INTDRIVE_IDLE;
if (lev->replenish_timer != NIL)
kern_event_delete(lev->replenish_timer);
 
delta_capacity = lev->q_theta - lev->avail;
mul32div32to32(delta_capacity, MAX_BANDWIDTH, lev->U, delta_time);
kern_gettime(&acttime);
ADDUSEC2TIMESPEC(delta_time,&acttime);
lev->replenish_timer = kern_event_post(&acttime,INTDRIVE_timer,(void *)lev);
/*temp = -lev->avail;
mul32div32to32(temp,lev->replenish_period,lev->capacity,delta_time)
ADDUSEC2TIMESPEC(delta_time,&(lev->replenish_expires));
lev->replenish_timer = kern_event_post(&(lev->replenish_expires),INTDRIVE_timer,(void *)lev);*/
 
#ifdef INTDRIVE_DEBUG
kern_printf("(INTD:IDLE:%d)",delta_time);
#endif
 
} else {
if (lev->act_number) {
proc_table[INTDRIVE_task].status = INTDRIVE_READY;
 
#ifdef INTDRIVE_DEBUG
kern_printf("(INTD:NEXT_ACT)");
#endif
 
} else {
 
#ifdef INTDRIVE_DEBUG
kern_printf("(INTD:WAIT_ACT)");
#endif
 
proc_table[INTDRIVE_task].status = INTDRIVE_WAIT;
 
}
}
 
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,
(unsigned short int)proc_table[INTDRIVE_task].context,(unsigned int)l);
 
return 0;
}
 
static void INTDRIVE_public_end(LEVEL l, PID p)
{
 
INTDRIVE_level_des *lev = (INTDRIVE_level_des *)(level_table[l]);
 
if (lev->replenish_timer != NIL)
kern_event_delete(lev->replenish_timer);
 
/*if (lev->wcet_timer != NIL)
kern_event_delete(lev->wcet_timer);*/
 
proc_table[INTDRIVE_task].status = INTDRIVE_IDLE;
 
}
 
/* Registration functions */
 
/*+ Registration function: +*/
LEVEL INTDRIVE_register_level(TIME capacity, TIME replenish_period, int flags)
{
LEVEL l; /* the level that we register */
INTDRIVE_level_des *lev;
 
printk("INTDRIVE_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(INTDRIVE_level_des));
 
lev = (INTDRIVE_level_des *)level_table[l];
 
lev->l.public_scheduler = INTDRIVE_public_scheduler;
lev->l.public_guarantee = NULL;
lev->l.public_create = INTDRIVE_public_create;
lev->l.public_end = INTDRIVE_public_end;
lev->l.public_dispatch = INTDRIVE_public_dispatch;
lev->l.public_epilogue = INTDRIVE_public_epilogue;
lev->l.public_activate = INTDRIVE_public_activate;
lev->l.public_unblock = INTDRIVE_public_unblock;
lev->l.public_block = INTDRIVE_public_block;
lev->l.public_message = INTDRIVE_public_message;
 
NULL_TIMESPEC(&(lev->act_time));
 
lev->capacity = capacity;
//lev->replenish_period = replenish_period;
lev->replenish_timer = NIL;
//lev->wcet_timer = NIL;
lev->flags = flags;
lev->act_number = 0;
lev->avail = 0;
lev->q_theta = capacity;
mul32div32to32(MAX_BANDWIDTH,lev->capacity,replenish_period,lev->U);
 
return l;
}
 
bandwidth_t INTDRIVE_usedbandwidth(LEVEL l)
{
INTDRIVE_level_des *lev = (INTDRIVE_level_des *)(level_table[l]);
 
return lev->U;
}
 
TIME INTDRIVE_set_q_theta(LEVEL l, TIME new_q_theta)
{
INTDRIVE_level_des *lev = (INTDRIVE_level_des *)(level_table[l]);
 
lev->q_theta = new_q_theta;
if (lev->q_theta < 0) lev->q_theta = 0;
if (lev->q_theta > lev->capacity) lev->q_theta = lev->capacity;
return lev->q_theta;
}
/shark/trunk/modules/intdrive/subdir.mk
0,0 → 1,0
OBJS += intdrive/intdrive.o
/shark/trunk/modules/ss/ss/ss.h
0,0 → 1,169
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: ss.h,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
This file contains the aperiodic server SS (Sporadic Server)
 
Title:
SS (Sporadic Server)
 
Task Models Accepted:
SOFT_TASK_MODEL - Soft Tasks
wcet field is ignored
met field is ignored
period field is ignored
periodicity field can be only APERIODIC
arrivals field can be either SAVE or SKIP
 
Description:
This module schedule his tasks following the Sporadic Server scheme.
The scheme is slightly modified respect to classic SS. The server
becomes active at arrival time of a task must be served.
 
All the tasks are put in a FIFO (FCFS) queue and at a time only the first
task in the queue is put in the upper level.
 
The module remembers pending activations when calling task_sleep...
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests. When a guest operation
is called, the exception is raised.
XUNVALID_SS_REPLENISH
Indicates an anomalous replenishment situation:
. replenish time fires and no amounts are set
. replenish amount posted when server is not active
. no more space to post a replenish amount
 
Restrictions & special features:
- This level doesn't manage the main task.
- At init time we have to specify:
. The Capacity and the period used by the server
- The level don't use the priority field.
- if an aperiodic task calls a task_delay when owning a mutex implemented
with shadows, the delay may have no effect, so don't use delay when
using a mutex!!!
- On calling task_delay and task_sleep the replenish amount is posted.
This because replenish time may fires when task is sleeping (server
is not active).
- A function to return the used bandwidth of the level is provided.
- A function to return the avail server capacity is provided.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __SS_H__
#define __SS_H__
 
#include <kernel/config.h>
#include <kernel/types.h>
#include <sys/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/*+ 1 - ln(2) +*/
#ifndef RM_MINFREEBANDWIDTH
#define RM_MINFREEBANDWIDTH 1317922825
#endif
 
/*+ Max size of replenish queue +*/
#define SS_MAX_REPLENISH MAX_EVENT
 
/*+ flags... +*/
#define SS_DISABLE_ALL 0
#define SS_ENABLE_BACKGROUND 1 /*+ Background scheduling enabled +*/
#define SS_ENABLE_GUARANTEE_EDF 2 /*+ Task Guarantee enabled +*/
#define SS_ENABLE_ALL_EDF 3 /*+ guarantee+background enabled +*/
 
#define SS_ENABLE_GUARANTEE_RM 4 /*+ Task Guarantee enabled +*/
#define SS_ENABLE_ALL_RM 5 /*+ guarantee+background enabled +*/
 
/*+ internal flags +*/
#define SS_BACKGROUND 8 /*+ this flag is set when scheduling
in background +*/
#define SS_BACKGROUND_BLOCK 16 /*+ this flag is set when we want to
blocks the background scheduling +*/
 
/*+ internal switches +*/
typedef enum {
SS_SERVER_NOTACTIVE, /*+ SS is not active +*/
SS_SERVER_ACTIVE /*+ SS is active +*/
} ss_status;
 
/*+ internal functions +*/
 
/*+ Used to store replenishment events +*/
/* Now we use static allocated array. In this way, no more then
SS_MAX_REPLENISH replenishments can be posted.
typedef struct {
struct timespec rtime;
int ramount;
replenishq *next;
} replenishq;
*/
 
/*+ Registration function:
bandwidth_t b Max bandwidth used by the SS
int flags Options to be used in this level instance...
LEVEL master The level that must be used as master level
int Cs Server capacity
int per Server period
 
returns the level number at which the module has been registered.
+*/
LEVEL SS_register_level(int flags, LEVEL master, int Cs, int per);
 
/*+ Returns the used bandwidth of a level +*/
bandwidth_t SS_usedbandwidth(LEVEL l);
 
/*+ Returns tha available capacity +*/
int SS_availCs(LEVEL l);
 
__END_DECLS
#endif
/shark/trunk/modules/ss/ss/ssutils.h
0,0 → 1,110
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: ssutils.h,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
This file contains utility functions used into
aperiodic server SS (Sporadic Server)
 
Title:
SS (Sporadic Server) utilities
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __SSUTILS_H__
#define __SSUTILS_H__
 
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/* Max size of replenish queue */
#define SS_MAX_REPLENISH MAX_EVENT
 
/*+ Used to store replenishment events +*/
/* Now we use static allocated array. In this way, no more then
SS_MAX_REPLENISH replenishments can be posted.
typedef struct {
struct timespec rtime;
int ramount;
replenishq *next;
} replenishq;
*/
 
/*+ SS local memory allocator.
Can be used for performance optimization. +*/
void *ss_alloc(DWORD b);
 
/*+ Insert an element at tail of replenish queue
LEVEL l module level
int amount element to insert
 
RETURNS:
0 seccesfull insertion
NIL no more space for insertion +*/
int ssq_inslast(LEVEL l, int amount);
 
/*+ Get first element from replenish queue
LEVEL l module level
 
RETURS: extracted element +*/
int ssq_getfirst(LEVEL l);
 
/*+ Enquire for empty queue
LEVEL l module level
 
RETURS:
0 queue is not empty
1 queue is empty +*/
int ssq_isempty(LEVEL l);
 
__END_DECLS
#endif
/shark/trunk/modules/ss/ss.c
0,0 → 1,1011
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: ss.c,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
This file contains the aperiodic Sporadic Server (SS).
 
Note: in the following, server capacity and server budget are used as
synonyms.
 
When scheduling in background the flags field has the SS_BACKGROUND bit set
 
When scheduling a task because it is pointed by another task via shadows,
the task have to be extracted from the wait queue or the master level. To
check this we have to look at the activated field; it is != NIL if a task
is inserted into the master level. Only a task at a time can be inserted
into the master level.
 
The capacity of the server must be updated
- when scheduling a task normally
- when scheduling a task because it is pointed by a shadow
but not when scheduling in background.
 
When a task is extracted from the system no scheduling has to be done
until the task reenter into the system. To implement this, when a task
is extracted we block the background scheduling (the scheduling with the
master level is already blocked because the activated field is not
reset to NIL) using the SS_BACKGROUNDBLOCK bit.
 
nact[p] is -1 if the task set the activations to SKIP, >= 0 otherwise
 
In contrast to classic SS scheme, the activation happens when
a task does a create request while there is positive budget (instead to
becomes active when there is a running task with priority higger then or
equal to the server).
So the replenish time is estabished on task arrival time. Replenish time
is calculated as usual: activation time + server period.
When the server ends its budget, becomes not active until a replenishment
occurs.
 
When a task ends its computation and there are no tasks to schedule or,
again, the server budget ends, a replenish amount is posted so that, when
replenish time fires, the server budget will be updated. Replenish
amount is determined depending on how much time tasks have ran.
Replenish amount does't takes into account periods during witch tasks
handled by SS are preempted.
 
There are two models used to handle a task is running into a critic section
(owning a mutex): "nostop" model and "stop" model.
Using the "nostop" model, a task that runs into a critic section is not
stopped when server ends its budget. This is done so higger priority tasks
waiting for mutex are not blocked for so much time to replenish time occurs.
When this happens the server capacity becomes negative and the replenish
amount takes into account the negative budget part.
With "stop" model running task is always suspended when server budget ends.
If suspended task owns a mutex shared with higger priority task, the last
one cannot runs until the mutex will be released. Higger priority task
must waits at least upto next replenish time, when server budget will be
refulled and suspended task runs again.
 
Using "nostop" model, SS can uses more bandwidth respect to assigned
capacity (due to negative budgets). So, calculating the guarantee, the
longer critic section of all tasks handled by SS must be considered.
 
SS can be used either with EDF or RM master level.
 
Read SS.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <stdlib.h>
#include <ss/ss/ss.h>
#include <ll/stdio.h>
#include <ll/string.h>
 
#include <ll/sys/ll/event.h>
 
#include <kernel/const.h>
#include <kernel/model.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
/* For debugging purpose */
//#define DEBUG 1
 
/*+ Status used in the level +*/
#define SS_WAIT APER_STATUS_BASE /*+ waiting the service +*/
 
/*+ Some useful macros +*/
#define BACKGROUND_ON (lev->flags & SS_BACKGROUND)
 
extern struct event *firstevent;
 
/*+ the level redefinition for the Sporadic Server +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
/* The wcet are stored in the task descriptor's priority
field, so no other fields are needed */
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
struct timespec lastdline; /*+ the last deeadline assigned to
a SS task +*/
 
int Cs; /*+ server capacity +*/
int availCs; /*+ server avail time +*/
int period; /*+ Server period +*/
bandwidth_t U; /*+ the used bandwidth by the server +*/
 
IQUEUE wait; /*+ the wait queue of the SS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
 
 
LEVEL scheduling_level;
 
int replenishment[SS_MAX_REPLENISH]; /*+ contains replenish amounts +*/
int rfirst,rlast; /*+ first and last valid replenish
in replenish queue +*/
int rcount; /*+ queued replenishments +*/
 
int replenish_amount; /*+ partial replenishments before post +*/
ss_status server_active; /*+ Is server active? +*/
 
} SS_level_des;
 
/*+ function prototypes +*/
void SS_internal_status(LEVEL l);
static void SS_replenish_timer(void *arg);
/*-------------------------------------------------------------------*/
 
/*** Utility functions ***/
 
 
/* These are for dinamic queue. **Disabled** */
#if 0
/* These routines are not tested, be carefull */
 
/*+ SS local memory allocator.
Can be used for performance optimization.
The interface is the same of kern_alloc() +*/
void inline * ss_alloc(DWORD b) {
/* Now simply wraps to standard kernel alloc */
return kern_alloc(b);
}
 
void ssq_inslast(LEVEL l, replenishq *elem) {
 
SS_level_des *lev = (SS_level_des *) level_table[l];
 
if(lev->rqueue_last == NULL) { /* empty queue */
lev->rqueue_last=elem;
lev->rqueue_first=elem;
return;
}
elem->next = NULL;
lev->rqueue_last->next = elem;
lev->rqueue_last = elem;
}
 
replenishq *ssq_getfirst(LEVEL l) {
 
SS_level_des *lev = (SS_level_des *) level_table[l];
replenishq *tmp;
if(lev->rqueue_first == NULL) { /* empty queue */
return 0;
}
tmp = lev->rqueue_first;
lev->rqueue_first = tmp->next;
if(lev->rqueue_first == NULL) { /* no more elements */
lev->rqueue_last = NULL;
}
tmp->next = NULL; /* to remove dangling pointer */
return tmp;
}
#endif
 
/* For queue implemented with array.
SS_MAX_REPLENISH array size assumed */
 
/*+ Insert an element at tail of replenish queue
LEVEL l module level
int amount element to insert
 
RETURNS:
0 seccesfull insertion
NIL no more space for insertion +*/
static inline int ssq_inslast (LEVEL l, int amount) {
 
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("insl ");
#endif
 
if (lev->rcount == SS_MAX_REPLENISH) {
return NIL; /* no more space in the queue */
}
 
lev->replenishment[lev->rlast++] = amount;
lev->rlast %= SS_MAX_REPLENISH;
lev->rcount++;
#ifdef DEBUG
printf_xy(0,0,WHITE,"%d",lev->rcount);
#endif
 
return 0;
}
 
/*+ Get first element from replenish queue
LEVEL l module level
 
RETURS:
extracted element
NIL on empty queue +*/
static inline int ssq_getfirst (LEVEL l) {
 
SS_level_des *lev = (SS_level_des *)(level_table[l]);
int tmp;
 
#ifdef DEBUG
kern_printf("getf ");
#endif
 
if (lev->rcount == 0) {
return NIL; /* empty queue */
}
tmp = lev->replenishment[lev->rfirst++];
lev->rfirst %= SS_MAX_REPLENISH;
lev->rcount--;
#ifdef DEBUG
printf_xy(0,0,WHITE,"%d",lev->rcount);
#endif
return tmp;
}
 
/*+ Enquire for empty queue
LEVEL l module level
 
RETURS:
0 queue is not empty
1 queue is empty +*/
static inline int ssq_isempty (LEVEL l) {
 
SS_level_des *lev = (SS_level_des *)(level_table[l]);
return !(lev->rcount);
 
// if(lev->rcount == 0)
// return 1;
// return 0;
}
 
/*+ Set replenish amount for budget used during task execution
LEVEL l module level */
static inline void SS_set_ra(LEVEL l)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
/* replenish must be set when the server is still active */
if(lev->server_active == SS_SERVER_ACTIVE) {
lev->server_active = SS_SERVER_NOTACTIVE;
if(ssq_inslast(l, lev->replenish_amount) == NIL) {
kern_printf("SS: no more space to post replenishment\n");
kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
SS_internal_status(l);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
exit(-1);
#endif
}
lev->replenish_amount = 0;
}
else {
kern_printf("SS not active when posting R.A.\n");
SS_internal_status(l);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
exit(-1);
#endif
}
}
/* ------------------------------------------------------------------ */
 
/* This static function activates the task pointed by lev->activated) */
static inline void SS_activation(SS_level_des *lev)
{
/* those two defines are for readableness */
PID p;
LEVEL m;
 
JOB_TASK_MODEL j; /* the guest model */
// struct timespec ty;
 
#ifdef DEBUG
kern_printf("SS_acti ");
#endif
 
p = lev->activated;
m = lev->scheduling_level;
 
#if 0
/* if server is active, replenish time already set */
if (lev->server_active == SS_SERVER_NOTACTIVE) {
lev->server_active = SS_SERVER_ACTIVE;
/* set replenish time */
TIMESPEC_ASSIGN(&ty, &proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period, &ty);
TIMESPEC_ASSIGN(&lev->lastdline, &ty);
#ifdef DEBUG
kern_printf("RT:%d.%d ",ty.tv_sec,ty.tv_nsec);
#endif
kern_event_post(&ty, SS_replenish_timer, (void *) l);
}
#endif
 
job_task_default_model(j,lev->lastdline);
job_task_def_period(j,lev->period);
level_table[m]->private_insert(m,p,(TASK_MODEL *)&j);
 
#ifdef DEBUG
kern_printf("PID:%p lastdl:%d.%d ",p,lev->lastdline.tv_sec,lev->lastdline.tv_nsec);
#endif
}
 
/*+
Before call capacity_timer, update server capacity
and replenish amount.
+*/
static void SS_capacity_timer(void *arg) {
 
LEVEL l = (LEVEL)arg;
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int tx;
 
#ifdef DEBUG
kern_printf("SS_captim ");
#endif
 
/* set replenish amount */
/* task was running while budget ends */
lev->server_active = SS_SERVER_NOTACTIVE;
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
if(ssq_inslast(l, tx+lev->replenish_amount) == NIL) {
kern_printf("SS: no more space to post replenishment\n");
kern_printf(" You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
SS_internal_status(l);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
exit(-1);
#endif
}
lev->replenish_amount = 0;
capacity_timer(NULL);
}
 
static void SS_replenish_timer(void *arg)
{
LEVEL l = (LEVEL)arg;
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int amount;
 
#ifdef DEBUG
kern_printf("SS_reptim ");
#endif
 
/* availCs may be <0 because a task executed via a shadow for many time
lev->activated == NIL only if the prec task was finished and there
was not any other task to be put in the ready queue
... we are now activating the next task */
if ((amount = ssq_getfirst(l)) != NIL) {
lev->availCs += amount;
#ifdef DEBUG
kern_printf("AvaCs=%d ",lev->availCs);
#endif
if (lev->availCs > lev->Cs) {
/* This should not be possible. I do so for robustness. */
lev->availCs = lev->Cs;
#ifdef DEBUG
kern_printf("SS warning: budget higher then server capacity. Set to Cs.");
#endif
}
if (lev->availCs <= 0) {
/* we can be here if nostop model is used */
#ifdef DEBUG
kern_printf("WARNING: SS has non positive capacity after replenish.");
#endif
/* if there isn't pending replenishment and server
is not active we must refull somehow.
Otherwise SS remains not active forever */
if(ssq_isempty(l) && lev->server_active == SS_SERVER_NOTACTIVE) {
lev->availCs = lev->Cs;
kern_printf("SS was full replenished due to irreversible non positive budget!!!\n");
kern_printf("You should review your time extimation for critical sections ;)\n");
}
}
}
else {
/* replenish queue is empty */
kern_printf("Replenish Timer fires but no Replenish Amount defined\n");
SS_internal_status(l);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
exit(-1);
#endif
}
 
if (lev->availCs > 0 && lev->activated == NIL) {
if (iq_query_first(&lev->wait) != NIL) {
lev->activated = iq_getfirst(&lev->wait);
/* if server is active, replenish time already set */
if (lev->server_active == SS_SERVER_NOTACTIVE) {
lev->server_active = SS_SERVER_ACTIVE;
/* set replenish time */
kern_gettime(&ty);
ADDUSEC2TIMESPEC(lev->period, &ty);
TIMESPEC_ASSIGN(&lev->lastdline, &ty);
#ifdef DEBUG
kern_printf("RT:%d.%d ",ty.tv_sec,ty.tv_nsec);
#endif
kern_event_post(&ty, SS_replenish_timer, (void *) l);
}
SS_activation(lev);
event_need_reschedule();
}
}
}
 
static char *SS_status_to_a(WORD status)
{
if (status < MODULE_STATUS_BASE)
return "Unavailable"; //status_to_a(status);
 
switch (status) {
case SS_WAIT : return "SS_Wait";
default : return "SS_Unknown";
}
}
 
 
/*-------------------------------------------------------------------*/
 
/*** Level functions ***/
 
void SS_internal_status(LEVEL l)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->wait);
 
kern_printf("On-line guarantee : %s\n",
(lev->flags & SS_ENABLE_GUARANTEE_EDF ||
lev->flags & SS_ENABLE_GUARANTEE_RM )?"On":"Off");
 
kern_printf("Used Bandwidth : %u/%u\n",lev->U,MAX_BANDWIDTH);
kern_printf("Period : %d\n",lev->period);
kern_printf("Capacity : %d\n",lev->Cs);
kern_printf("Avail capacity : %d\n",lev->availCs);
kern_printf("Server is %sactive\n",
(lev->server_active == SS_SERVER_NOTACTIVE ? "not ":""));
kern_printf("Pending RAs : %d\n",lev->rcount);
 
if (lev->activated != NIL)
kern_printf("Activated: Pid: %d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
iq_query_timespec(lev->activated,&lev->wait)->tv_sec,
iq_query_timespec(lev->activated,&lev->wait)->tv_nsec,
lev->nact[lev->activated],
SS_status_to_a(proc_table[lev->activated].status));
 
while (p != NIL) {
kern_printf("Pid: %d\tName: %10s\tStatus: %s\n",
p,
proc_table[p].name,
SS_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->wait);
}
}
 
static PID SS_public_schedulerbackground(LEVEL l)
{
/* the SS catch the background time to exec aperiodic activities */
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("SS_levschbg ");
#endif
 
lev->flags |= SS_BACKGROUND;
 
if (lev->flags & SS_BACKGROUND_BLOCK)
return NIL;
else
return iq_query_first(&lev->wait);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int SS_public_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("SS_levguarEDF ");
#endif
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int SS_public_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("SS_levguarRM ");
#endif
 
if (*freebandwidth > lev->U + RM_MINFREEBANDWIDTH) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
/*-------------------------------------------------------------------*/
 
/*** Task functions ***/
 
 
static int SS_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
SOFT_TASK_MODEL *s;
 
#ifdef DEBUG
kern_printf("SS_taskcre ");
#endif
 
if (m->pclass != SOFT_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
s = (SOFT_TASK_MODEL *)m;
if (s->periodicity != APERIODIC) return -1;
s = (SOFT_TASK_MODEL *)m;
 
if (s->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
 
return 0; /* OK, also if the task cannot be guaranteed */
}
 
static void SS_public_dispatch(LEVEL l, PID p, int nostop)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
 
#ifdef DEBUG
kern_printf("SS_tdi ");
#endif
 
TIMESPEC_ASSIGN(&ty, &schedule_time);
/* set replenish time */
if(!BACKGROUND_ON) {
if(lev->server_active == SS_SERVER_NOTACTIVE) {
lev->server_active = SS_SERVER_ACTIVE;
ADDUSEC2TIMESPEC(lev->period,&ty);
TIMESPEC_ASSIGN(&lev->lastdline, &ty);
#ifdef DEBUG
kern_printf("tdiPID:%d RT:%d.%d ",p,ty.tv_sec,ty.tv_nsec);
#endif
kern_event_post(&ty, SS_replenish_timer,(void *) l);
}
}
#ifdef DEBUG
if (nostop) kern_printf("NOSTOP!!! ");
#endif
 
/* there is at least one task ready inserted in an RM or similar level.
Note that we can't check the status because the scheduler sets it
to exe before calling task_dispatch.
We have to check lev->activated != p instead */
if (lev->activated != p) {
iq_extract(p, &lev->wait);
#ifdef DEBUG
kern_printf("extr task:%d ",p);
#endif
}
else {
#ifdef DEBUG
if (nostop) kern_printf("(gd status=%d)",proc_table[p].status);
#endif
level_table[lev->scheduling_level]->
private_dispatch(lev->scheduling_level,p,nostop);
}
 
/* set capacity timer */
if (!nostop && !BACKGROUND_ON) {
TIMESPEC_ASSIGN(&ty, &schedule_time);
// kern_printf("ty:%d.%d ",ty.tv_sec,ty.tv_nsec);
ADDUSEC2TIMESPEC((lev->availCs<=0 ? 0:lev->availCs),&ty);
// kern_printf("avCs:%d ty:%d.%d ",lev->availCs,ty.tv_sec,ty.tv_nsec);
/* stop the task if budget ends */
#ifdef DEBUG
kern_printf("PID:%d ST=%d.%d ",p,ty.tv_sec,ty.tv_nsec);
#endif
cap_timer = kern_event_post(&ty, SS_capacity_timer,(void *) l);
}
}
 
static void SS_public_epilogue(LEVEL l, PID p) {
 
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int tx;
 
#ifdef DEBUG
kern_printf("SS_tep ");
#endif
/* update the server capacity */
if (BACKGROUND_ON)
lev->flags &= ~SS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
// kern_printf("ty:%d.%d ",ty.tv_sec,ty.tv_nsec);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
// kern_printf("avCs:%d ty:%d.%d ",lev->availCs,ty.tv_sec,ty.tv_nsec);
lev->replenish_amount += tx;
#ifdef DEBUG
kern_printf("RA:%d ",lev->replenish_amount);
#endif
}
 
/* check if the server capacity is finished... */
if (lev->availCs <= 0) {
/* The server slice has finished... do the task_end!!!
A first version of the module used the task_endcycle, but it was
not conceptually correct because the task didn't stop because it
finished all the work, but because the server didn't have budget!
So, if the task_endcycle is called, the task remain into the
master level, and we can't wake him up if, for example, another
task point the shadow to it!!! */
 
/* set replenish amount */
if(!(BACKGROUND_ON)) {
if(lev->server_active == SS_SERVER_ACTIVE) {
lev->server_active = SS_SERVER_NOTACTIVE;
if(ssq_inslast(l, lev->replenish_amount) == NIL) {
kern_printf("SS: no more space to post replenishment\n");
kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
SS_internal_status(l);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
exit(-1);
#endif
}
lev->replenish_amount = 0;
}
}
 
if (lev->activated == p)
level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p);
 
iq_insertfirst(p, &lev->wait);
proc_table[p].status = SS_WAIT;
lev->activated = NIL;
}
else {
/* The task has been preempted.
It returns into the ready queue or to the
wait queue by calling the private_epilogue... */
 
if (lev->activated == p) { /* goes into ready queue */
level_table[ lev->scheduling_level ]->
private_epilogue(lev->scheduling_level,p);
}
else { /* goes into wait queue */
iq_insertfirst(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
}
}
 
static void SS_public_activate(LEVEL l, PID p, struct timespec *t)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("SS_tacti ");
#endif
 
if (lev->activated == p || proc_table[p].status == SS_WAIT) {
if (lev->nact[p] != -1) lev->nact[p]++;
}
else if (proc_table[p].status == SLEEP) {
if (lev->activated == NIL && lev->availCs > 0) {
if(!BACKGROUND_ON) {
/* if server is active, replenish time already set */
if (lev->server_active == SS_SERVER_NOTACTIVE) {
lev->server_active = SS_SERVER_ACTIVE;
/* set replenish time */
ADDUSEC2TIMESPEC(lev->period, t);
TIMESPEC_ASSIGN(&lev->lastdline, t);
#ifdef DEBUG
kern_printf("RT=%d.%d ",t->tv_sec,t->tv_nsec);
#endif
kern_event_post(t, SS_replenish_timer, (void *) l);
}
}
lev->activated = p;
SS_activation(lev);
}
else {
iq_insertlast(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
}
else {
kern_printf("SS_REJ%d %d %d %d ",
p,
proc_table[p].status,
lev->activated,
lev->wait.first);
return;
}
}
 
static void SS_public_unblock(LEVEL l, PID p)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("SS_tins ");
#endif
lev->flags &= ~SS_BACKGROUND_BLOCK;
 
lev->activated = NIL;
 
/* when we reinsert the task into the system, the server capacity
is always 0 because nobody executes with the SS before... */
iq_insertfirst(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
 
static void SS_public_block(LEVEL l, PID p)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("SS_textr ");
#endif
 
/* set replenish amount */
if(!(BACKGROUND_ON)) {
SS_set_ra(l);
}
 
/* clear the server capacity */
lev->availCs = 0;
 
lev->flags |= SS_BACKGROUND_BLOCK;
 
if (lev->activated == p)
level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p);
}
 
static int SS_public_message(LEVEL l, PID p, void *m)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int tx;
 
#ifdef DEBUG
kern_printf("SS_tendcy ");
#endif
 
/* update the server capacity */
if (BACKGROUND_ON)
lev->flags &= ~SS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
lev->replenish_amount += tx;
#ifdef DEBUG
kern_printf("PID:%d RA=%d ",lev->replenish_amount);
#endif
}
 
if (lev->activated == p)
level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
 
if (lev->nact[p] > 0) {
lev->nact[p]--;
iq_insertlast(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
else {
proc_table[p].status = SLEEP;
}
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL) {
SS_activation(lev);
}
else {
/* No more task to schedule; set replenish amount */
if(!(BACKGROUND_ON)) {
SS_set_ra(l);
}
}
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
return 0;
}
 
static void SS_public_end(LEVEL l, PID p)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int tx;
 
#ifdef DEBUG
kern_printf("SS_tend ");
#endif
 
/* update the server capacity */
if (BACKGROUND_ON)
lev->flags &= ~SS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
lev->replenish_amount += tx;
#ifdef DEBUG
kern_printf("PID:%d RA=%d ",p,lev->replenish_amount);
#endif
}
 
if (lev->activated == p)
level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p);
 
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL) {
SS_activation(lev);
}
else {
if(!(BACKGROUND_ON)){
/* No more task to schedule; set replenish amount */
SS_set_ra(l);
}
}
}
 
/*-------------------------------------------------------------------*/
 
/*** Registration functions ***/
 
 
/*+ Registration function:
int flags the init flags ... see SS.h +*/
LEVEL SS_register_level(int flags, LEVEL master, int Cs, int per)
{
LEVEL l; /* the level that we register */
SS_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(SS_level_des));
 
printk("SS_register_level\n");
 
lev = (SS_level_des *)level_table[l];
 
/* fill the standard descriptor */
 
if (flags & SS_ENABLE_BACKGROUND)
lev->l.public_scheduler = SS_public_schedulerbackground;
 
if (flags & SS_ENABLE_GUARANTEE_EDF)
lev->l.public_guarantee = SS_public_guaranteeEDF;
else if (flags & SS_ENABLE_GUARANTEE_RM)
lev->l.public_guarantee = SS_public_guaranteeRM;
else
lev->l.public_guarantee = NULL;
 
lev->l.public_create = SS_public_create;
lev->l.public_end = SS_public_end;
lev->l.public_dispatch = SS_public_dispatch;
lev->l.public_epilogue = SS_public_epilogue;
lev->l.public_activate = SS_public_activate;
lev->l.public_unblock = SS_public_unblock;
lev->l.public_block = SS_public_block;
lev->l.public_message = SS_public_message;
 
/* fill the SS descriptor part */
 
for (i=0; i<MAX_PROC; i++)
lev->nact[i] = -1;
 
lev->Cs = Cs;
lev->availCs = Cs;
 
lev->period = per;
 
iq_init(&lev->wait, &freedesc, 0);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / per) * Cs;
lev->scheduling_level = master;
lev->flags = flags & 0x07;
/* This is superfluos. I do it for robustness */
for (i=0;i<SS_MAX_REPLENISH;lev->replenishment[i++]=0);
/* Initialize replenishment stuff */
lev->rfirst=0;
lev->rlast=0;
lev->rcount=0;
lev->replenish_amount=0;
lev->server_active=SS_SERVER_NOTACTIVE;
 
return l;
}
 
bandwidth_t SS_usedbandwidth(LEVEL l)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
return lev->U;
}
 
int SS_availCs(LEVEL l) {
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
return lev->availCs;
}
/shark/trunk/modules/ss/subdir.mk
0,0 → 1,0
OBJS += ss/ss.o
/shark/trunk/modules/tbs/tbs/tbs.h
0,0 → 1,157
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: tbs.h,v 1.1 2005-02-25 10:53:02 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:53:02 $
------------
 
This file contains the aperiodic server TBS (Total Bandwidth Server)
 
Title:
TBS (Total Bandwidth Server)
 
Task Models Accepted:
SOFT_TASK_MODEL - Aperiodic Tasks
wcet field must be != 0
met field is ignored
period field is ignored
periodicity must be APERIODIC
arrivals can be either SAVE or SKIP
 
Description:
This module schedule his tasks following the TBS scheme.
Each task has a deadline assigned with the TBS scheme,
 
wcet
d = max(r , d ) + ----
k k k-1 Us
 
The tasks are inserted in an EDF level (or similar) with a JOB_TASK_MODEL,
and the TBS level expects that the task is scheduled with the absolute
deadline passed in the model.
 
The task guarantee is based on the factor utilization approach.
The theory guarantees that the task set is schedulable if
Up + Us <= 1
so it is sufficient to add the Us to the bandwidth used by the upper
levels (we suppose that the levels with level number < of the current
can guarantee their task sets basing on the same formula...
 
All the tasks are put in a FIFO (FCFS) queue and at a time only the first
task in the queue is put in the upper level.
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests. When a guest operation
is called, the exception is raised.
 
These exceptions are pclass-dependent...
XDEADLINE_MISS
If a task miss his deadline, the exception is raised.
Normally, a TBS task can't cause the raise of such exception because
if it really use more time than declared a XWCET_VIOLATION is raised
instead.
 
XWCET_VIOLATION
If a task doesn't end the current cycle before if consume the wcet,
an exception is raised, and the task is put in the TBS_WCET_VIOLATED
state. To reactivate it, use TBS_task_activate via task_activate or
manage directly the TBS data structure. Note that if the exception
is not handled properly, an XDEADLINE_MISS exception will also be
raised at the absolute deadline...
 
Restrictions & special features:
- This level doesn't manage the main task.
- At init time we have to specify:
. The bandwidth used by the server
. some flags
. wcet check activated
(If a task require more time than declared, it is stopped and put in
the state TBS_WCET_VIOLATED; a XWCET_VIOLATION exception is raised)
. guarantee check
(when all task are created the system will check that the task_set
will not use more than the available bandwidth)
- The level don't use the priority field.
- A function to return the used bandwidth of the level is provided.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __TBS_H__
#define __TBS_H__
 
#include <ll/ll.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/*+ flags... +*/
#define TBS_DISABLE_ALL 0
#define TBS_ENABLE_WCET_CHECK 1 /*+ Wcet check enabled +*/
#define TBS_ENABLE_GUARANTEE 2 /*+ Task Guarantee enabled +*/
#define TBS_ENABLE_ALL 3 /*+ All flags enabled +*/
 
 
/*+ Registration function:
bandwidth_t b Max bandwidth used by the TBS
int flags Options to be used in this level instance...
LEVEL master the level that must be used as master level for the
TBS tasks
int num,den used to compute the TBS bandwidth +*/
void TBS_register_level(int flags, LEVEL master, int num, int den);
 
/*+ Returns the used bandwidth of a level +*/
bandwidth_t TBS_usedbandwidth(LEVEL l);
 
/*+ Returns the number of pending activations of a task, or -1 if the level
is wrong.
No control is done if the task is not a TBS task! +*/
int TBS_get_nact(LEVEL l, PID p);
 
__END_DECLS
#endif
/shark/trunk/modules/tbs/tbs.c
0,0 → 1,419
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: tbs.c,v 1.1 2005-02-25 10:43:38 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:43:38 $
------------
 
This file contains the aperiodic server TBS (Total Bandwidth Server)
 
Read tbs.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <tbs/tbs/tbs.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
/*+ 4 debug purposes +*/
#undef TBS_TEST
 
/*+ Status used in the level +*/
#define TBS_WCET_VIOLATED APER_STATUS_BASE+2 /*+ when wcet is finished +*/
#define TBS_WAIT APER_STATUS_BASE /*+ waiting the service +*/
 
/*+ task flags +*/
#define TBS_SAVE_ARRIVALS 1
 
/*+ the level redefinition for the Total Bandwidth Server level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
/* The wcet are stored in the task descriptor's priority
field. */
 
int nact[MAX_PROC]; /*+ used to record activations +*/
BYTE flag[MAX_PROC];
 
struct timespec lastdline; /*+ the last deadline assigned to
a TBS task +*/
 
IQUEUE wait; /*+ the wait queue of the TBS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth by the server +*/
int band_num;
int band_den;
 
LEVEL scheduling_level;
 
} TBS_level_des;
 
#ifdef TESTG
#include "drivers/glib.h"
#endif
 
/* This static function activates the task pointed by lev->activated) */
static __inline__ void TBS_activation(TBS_level_des *lev)
{
PID p; /* for readableness */
JOB_TASK_MODEL j; /* the guest model */
TIME drel; /* the relative deadline of the task */
LEVEL m; /* the master level... only for readableness */
 
#ifdef TESTG
TIME x;
extern TIME starttime;
#endif
 
p = lev->activated;
/* we compute a suitable deadline for the task */
drel = (proc_table[p].wcet * lev->band_den) / lev->band_num;
 
ADDUSEC2TIMESPEC(drel, &lev->lastdline);
 
#ifdef TESTG
if (starttime) {
x = ((lev->lastdline.tv_sec*1000000+lev->lastdline.tv_nsec/1000)/5000 - starttime) + 20;
if (x<640)
grx_plot(x, 15, 7);
}
#endif
 
/* and we insert the task in another level */
m = lev->scheduling_level;
job_task_default_model(j,lev->lastdline);
level_table[m]->private_insert(m,p,(TASK_MODEL *)&j);
 
#ifdef TBS_TEST
kern_printf("TBS_activation: lastdline %ds %dns\n",lev->lastdline.tv_sec,lev->lastdline.tv_nsec);
#endif
}
 
/* This static function reclaims the unused time of the task p */
static __inline__ void TBS_bandwidth_reclaiming(TBS_level_des *lev, PID p)
{
TIME reclaimed;
struct timespec r, sos;
 
// kern_printf("%d ", proc_table[p].avail_time);
reclaimed = (proc_table[p].avail_time * lev->band_den) / lev->band_num;
 
r.tv_nsec = (reclaimed % 1000000) * 1000;
r.tv_sec = reclaimed / 1000000;
 
SUBTIMESPEC(&lev->lastdline, &r, &sos);
TIMESPEC_ASSIGN(&lev->lastdline, &sos);
 
#ifdef TBS_TEST
kern_printf("TBS_bandwidth_reclaiming: lastdline %ds %dns, reclaimed %d, avail %d\n",
lev->lastdline.tv_sec, lev->lastdline.tv_nsec, reclaimed, proc_table[p].avail_time);
#endif
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int TBS_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int TBS_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
/* if the TBS_task_create is called, then the pclass must be a
valid pclass. */
SOFT_TASK_MODEL *s;
 
if (m->pclass != SOFT_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
s = (SOFT_TASK_MODEL *)m;
if (!(s->wcet && s->periodicity == APERIODIC)) return -1;
proc_table[p].wcet = s->wcet;
 
/* Enable wcet check */
if (lev->flags & TBS_ENABLE_WCET_CHECK) {
proc_table[p].avail_time = s->wcet;
proc_table[p].control |= CONTROL_CAP;
}
 
lev->nact[p] = 0;
if (s->arrivals == SAVE_ARRIVALS)
lev->flag[p] = TBS_SAVE_ARRIVALS;
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void TBS_public_dispatch(LEVEL l, PID p, int nostop)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
/* there is at least one task ready inserted in an EDF or similar
level */
 
level_table[ lev->scheduling_level ]->
private_dispatch(lev->scheduling_level,p,nostop);
}
 
static void TBS_public_epilogue(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
if ((lev->flags & TBS_ENABLE_WCET_CHECK) && proc_table[p].avail_time <= 0) {
/* if it is, raise a XWCET_VIOLATION exception */
kern_raise(XWCET_VIOLATION,p);
proc_table[p].status = TBS_WCET_VIOLATED;
 
/* the current task have to die in the scheduling queue, and another
have to be put in place... this code is identical to the
TBS_task_end */
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
 
/* we reclaim an avail time that can be <0 due to the timer
approximations -> we have to postpone the deadline a little!
we can use the ADDUSEC2TIMESPEC because the time postponed is
less than 55ms */
ADDUSEC2TIMESPEC((-proc_table[p].avail_time * lev->band_den)
/ lev->band_num, &lev->lastdline);
 
#ifdef TBS_TEST
kern_printf("TBS_task_epilogue: Deadline posponed to %ds %dns\n",
lev->lastdline.tv_sec, lev->lastdline.tv_nsec);
#endif
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
}
else
/* the task has been preempted. it returns into the ready queue by
calling the guest_epilogue... */
level_table[ lev->scheduling_level ]->
private_epilogue(lev->scheduling_level,p);
}
 
static void TBS_public_activate(LEVEL l, PID p, struct timespec *t)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
if (proc_table[p].status == SLEEP ||
proc_table[p].status == TBS_WCET_VIOLATED) {
 
if (TIMESPEC_A_GT_B(t, &lev->lastdline))
TIMESPEC_ASSIGN(&lev->lastdline, t );
 
 
if (lev->activated == NIL) {
/* This is the first task in the level, so we activate it immediately */
lev->activated = p;
TBS_activation(lev);
}
else {
proc_table[p].status = TBS_WAIT;
iq_insertlast(p, &lev->wait);
}
}
else if (lev->flag[p] & TBS_SAVE_ARRIVALS)
lev->nact[p]++;
/* else
kern_printf("TBSREJ!!!");*/
}
 
static void TBS_public_unblock(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
JOB_TASK_MODEL j;
 
job_task_default_model(j,lev->lastdline);
level_table[lev->scheduling_level]->
private_insert(lev->scheduling_level,p,(TASK_MODEL *)&j);
}
 
static void TBS_public_block(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
}
 
static int TBS_public_message(LEVEL l, PID p, void *m)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
/* a task activation is finished, but we are using a JOB_TASK_MODEL
that implements a single activation, so we have to call
the guest_end, that representsa single activation... */
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
 
TBS_bandwidth_reclaiming(lev,p);
 
/* we reset the capacity counters... */
if (lev->flags & TBS_ENABLE_WCET_CHECK)
proc_table[p].avail_time = proc_table[p].wcet;
 
if (lev->nact[p]) {
// lev->nact[p] can be >0 only if the SAVE_ARRIVALS bit is set
lev->nact[p]--;
proc_table[p].status = TBS_WAIT;
iq_insertlast(p, &lev->wait);
}
else
proc_table[p].status = SLEEP;
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
return 0;
}
 
static void TBS_public_end(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
 
TBS_bandwidth_reclaiming(lev,p);
 
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
}
 
/* Registration functions */
 
/*+ Registration function:
int flags the init flags ... see TBS.h +*/
void TBS_register_level(int flags, LEVEL master, int num, int den)
{
LEVEL l; /* the level that we register */
TBS_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("TBS_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(TBS_level_des));
 
lev = (TBS_level_des *)level_table[l];
 
/* fill the standard descriptor */
if (flags & TBS_ENABLE_GUARANTEE)
lev->l.public_guarantee = TBS_public_guarantee;
else
lev->l.public_guarantee = NULL;
 
lev->l.public_guarantee = TBS_public_guarantee;
lev->l.public_create = TBS_public_create;
lev->l.public_end = TBS_public_end;
lev->l.public_dispatch = TBS_public_dispatch;
lev->l.public_epilogue = TBS_public_epilogue;
lev->l.public_activate = TBS_public_activate;
lev->l.public_unblock = TBS_public_unblock;
lev->l.public_block = TBS_public_block;
lev->l.public_message = TBS_public_message;
 
/* fill the TBS descriptor part */
 
for (i = 0; i < MAX_PROC; i++) {
lev->nact[i] = 0;
lev->flag[i] = 0;
}
 
NULL_TIMESPEC(&lev->lastdline);
 
iq_init(&lev->wait, &freedesc, 0);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / den) * num;
lev->band_num = num;
lev->band_den = den;
 
lev->scheduling_level = master;
 
lev->flags = flags & 0x07;
}
 
bandwidth_t TBS_usedbandwidth(LEVEL l)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
return lev->U;
}
 
int TBS_get_nact(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
return lev->nact[p];
}
 
/shark/trunk/modules/tbs/subdir.mk
0,0 → 1,0
OBJS += tbs/tbs.o
/shark/trunk/modules/dummy/dummy/dummy.h
0,0 → 1,101
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: dummy.h,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
This file contains the scheduling module RR (Round Robin)
 
Title:
DUMMY
 
Task Models Accepted:
DUMMY_TASK_MODEL - Dummy process (not usable)
 
Description:
This module creates the dummy task, witch is a special task that
do nothing.
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests. When a guest operation
is called, the exception is raised.
XUNVALID_DUMMY_OP
The dummy task can't be created, or activated, and so on...
 
Restrictions & special features:
- the task model DUMMY_TASK_MODEL can be used only at init time
to register the dummy process into the system.
- if __HLT_WORKS__ defined in this header file, the dummy task can
perform a hlt istruction to save power...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __DUMMY_H__
#define __DUMMY_H__
 
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/*+
On upper Intel CPUs it is possible to avoid CPU power consumption
when the system is idle issuing the hlt instruction.
This is often available on many 32 bit CPUs...
If it is, simply define the following!!!
+*/
#define __HLT_WORKS__
 
/*+ Registration function
 
returns the level number at which the module has been registered.
+*/
LEVEL dummy_register_level();
 
__END_DECLS
#endif
/shark/trunk/modules/dummy/dummy.c
0,0 → 1,184
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: dummy.c,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
This file contains the Dummy scheduling module
 
Read dummy.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
#include <dummy/dummy/dummy.h>
#include <ll/ll.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
 
 
/*+ the level redefinition for the Dummy level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
PID dummy; /*+ the dummy task... +*/
} dummy_level_des;
 
 
static PID dummy_public_scheduler(LEVEL l)
{
dummy_level_des *lev = (dummy_level_des *)(level_table[l]);
//kern_printf("DUMMYsched!!! %d", lev->dummy);
return lev->dummy;
}
 
static int dummy_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
dummy_level_des *lev = (dummy_level_des *)(level_table[l]);
 
if (m->pclass != DUMMY_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
if (lev->dummy != -1) return -1;
 
/* the dummy level doesn't introduce any new field in the TASK_MODEL
so, all initialization stuffs are done by the task_create.
the task state is set at SLEEP by the general task_create */
return 0; /* OK */
}
 
static void dummy_public_dispatch(LEVEL l, PID p, int nostop)
{
/* nothing... the dummy hangs the cpu waiting for interrupts... */
}
 
static void dummy_public_epilogue(LEVEL l, PID p)
{
proc_table[p].status = SLEEP; /* Paranoia */
}
 
/*+ Dummy task must be present & cannot be killed; +*/
static TASK dummy()
{
/*
It is possible to Halt the CPU & avoid consumption if idle
cycle are intercepted with hlt instructions!
It seems that some CPU have buggy hlt instruction or they
have not it at all! So, if available, use the hlt facility!!
*/
#ifdef __HLT_WORKS__
for(;;) {
// kern_printf("?");
hlt();
}
#else
for(;;);// kern_printf("?");
#endif
}
 
/* Registration functions */
 
/*+ This init function install the dummy task +*/
static void dummy_create(void *l)
{
LEVEL lev;
PID p;
DUMMY_TASK_MODEL m;
 
lev = (LEVEL)l;
 
dummy_task_default_model(m);
dummy_task_def_level(m,lev);
dummy_task_def_system(m);
dummy_task_def_nokill(m);
dummy_task_def_ctrl_jet(m);
 
((dummy_level_des *)level_table[lev])->dummy = p =
task_create("Dummy", dummy, &m, NULL);
 
if (p == NIL)
printk("\nPanic!!! can't create dummy task...\n");
 
/* dummy must block all signals... */
proc_table[p].sigmask = 0xFFFFFFFF;
}
 
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified +*/
LEVEL dummy_register_level()
{
LEVEL l; /* the level that we register */
dummy_level_des *lev; /* for readableness only */
 
printk("dummy_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(dummy_level_des));
 
lev = (dummy_level_des *)level_table[l];
 
/* fill the standard descriptor */
lev->l.public_scheduler = dummy_public_scheduler;
lev->l.public_guarantee = NULL;
lev->l.public_create = dummy_public_create;
lev->l.public_dispatch = dummy_public_dispatch;
lev->l.public_epilogue = dummy_public_epilogue;
 
/* the dummy process will be created at init_time.
see also dummy_level_accept_model,dummy_create */
lev->dummy = -1;
 
sys_atrunlevel(dummy_create,(void *) l, RUNLEVEL_INIT);
 
return l;
}
/shark/trunk/modules/dummy/subdir.mk
0,0 → 1,0
OBJS += dummy/dummy.o
/shark/trunk/modules/nop/nop/nop.h
0,0 → 1,90
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: nop.h,v 1.1 2005-02-25 10:53:02 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:53:02 $
------------
 
This file contains the No Protocol (NOP) implementation of mutexes
 
Title:
NOP (Binary Semaphores)
 
Resource Models Accepted:
None
 
Description:
This module implement a mutex interface using extraction and insertion
into scheduling queues.
 
Exceptions raised:
none
 
Restrictions & special features:
- This module is NOT Posix compliant
- This module can manage any number of NOP mutexes.
- A NOP mutex can be statically allocated. To do this, the init function
have to define a macro that puts this information in the mutex
descriptor: mutexlevel = <NOP resource level>; opt = NULL;
for example, if the NOP module is registered at level 1, the macro is
like:
#define MUTEX_INITIALIZER {1,(void *)NULL}
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
 
#ifndef __NOP_H__
#define __NOP_H__
 
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
RLEVEL NOP_register_module(void);
 
__END_DECLS
#endif
/shark/trunk/modules/nop/nop.c
0,0 → 1,270
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: nop.c,v 1.1 2005-02-25 10:40:58 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:40:58 $
------------
 
Binary Semaphores. see nop.h for more details...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <nop/nop/nop.h>
 
#include <ll/ll.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/* The NOP resource level descriptor */
typedef struct {
mutex_resource_des m; /*+ the mutex interface +*/
} NOP_mutex_resource_des;
 
 
/* this is the structure normally pointed by the opt field in the
mutex_t structure */
typedef struct {
PID owner;
IQUEUE blocked;
} NOP_mutex_t;
 
 
/* Wait status for this library */
#define NOP_WAIT LIB_STATUS_BASE
 
static int NOP_res_register(RLEVEL l, PID p, RES_MODEL *r)
{
return -1;
}
 
static void NOP_res_detach(RLEVEL l, PID p)
{
}
 
static int NOP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
{
NOP_mutex_t *p;
if (a->mclass != NOP_MCLASS)
return -1;
 
p = (NOP_mutex_t *) kern_alloc(sizeof(NOP_mutex_t));
 
/* control if there is enough memory; no control on init on a
non- destroyed mutex */
 
if (!p)
return (ENOMEM);
 
p->owner = NIL;
iq_init(&p->blocked, &freedesc, 0);
 
m->mutexlevel = l;
m->opt = (void *)p;
 
return 0;
}
 
 
static int NOP_destroy(RLEVEL l, mutex_t *m)
{
// NOP_mutex_resource_des *lev = (NOP_mutex_resource_des *)(resource_table[l]);
SYS_FLAGS f;
 
if ( ((NOP_mutex_t *)m->opt)->owner != NIL)
return (EBUSY);
 
f = kern_fsave();
if (m->opt) {
kern_free(m->opt,sizeof(NOP_mutex_t));
m->opt = NULL;
}
kern_frestore(f);
 
return 0;
}
 
static int NOP_lock(RLEVEL l, mutex_t *m)
{
NOP_mutex_t *p;
SYS_FLAGS f;
 
f = kern_fsave();
 
p = (NOP_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, initialize it! */
NOP_mutexattr_t a;
NOP_mutexattr_default(a);
NOP_init(l, m, &a);
}
 
if (p->owner == exec_shadow) {
/* the task already owns the mutex */
kern_frestore(f);
return (EDEADLK);
}
 
if (p->owner != NIL) { /* We must block exec task */
LEVEL l; /* for readableness only */
proc_table[exec_shadow].context = kern_context_save();
kern_epilogue_macro();
l = proc_table[exec_shadow].task_level;
level_table[l]->public_block(l,exec_shadow);
 
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = NOP_WAIT;
iq_insertlast(exec_shadow,&p->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
kern_context_load(proc_table[exec_shadow].context);
}
else {
/* the mutex is free, We can lock it! */
p->owner = exec_shadow;
kern_frestore(f);
}
 
return 0;
}
 
static int NOP_trylock(RLEVEL l, mutex_t *m)
{
NOP_mutex_t *p;
SYS_FLAGS f;
 
f = kern_fsave();
 
p = (NOP_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, initialize it! */
NOP_mutexattr_t a;
NOP_mutexattr_default(a);
NOP_init(l, m, &a);
}
 
if (p->owner != NIL) {
/* a task already owns the mutex */
kern_frestore(f);
return (EBUSY);
}
else {
/* the mutex is free, We can lock it! */
p->owner = exec_shadow;
kern_frestore(f);
}
 
return 0;
}
 
static int NOP_unlock(RLEVEL l, mutex_t *m)
{
NOP_mutex_t *p;
 
p = (NOP_mutex_t *)m->opt;
if (!p)
return (EINVAL);
 
if (p->owner != exec_shadow) {
/* the mutex is owned by another task!!! */
kern_sti();
return (EPERM);
}
 
proc_table[exec_shadow].context = kern_context_save();
 
/* the mutex is mine, pop the firsttask to extract */
p->owner = iq_getfirst(&p->blocked);
if (p->owner != NIL) {
l = proc_table[p->owner].task_level;
level_table[l]->public_unblock(l,p->owner);
}
 
scheduler();
kern_context_load(proc_table[exec_shadow].context);
 
return 0;
}
 
RLEVEL NOP_register_module(void)
{
RLEVEL l; /* the level that we register */
NOP_mutex_resource_des *m; /* for readableness only */
 
printk("NOP_register_module\n");
 
/* request an entry in the level_table */
l = resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m = (NOP_mutex_resource_des *)kern_alloc(sizeof(NOP_mutex_resource_des));
 
/* update the level_table with the new entry */
resource_table[l] = (resource_des *)m;
 
/* fill the resource_des descriptor */
m->m.r.rtype = MUTEX_RTYPE;
m->m.r.res_register = NOP_res_register;
m->m.r.res_detach = NOP_res_detach;
 
/* fill the mutex_resource_des descriptor */
m->m.init = NOP_init;
m->m.destroy = NOP_destroy;
m->m.lock = NOP_lock;
m->m.trylock = NOP_trylock;
m->m.unlock = NOP_unlock;
 
return l;
}
 
/shark/trunk/modules/nop/subdir.mk
0,0 → 1,0
OBJS += nop/nop.o
/shark/trunk/modules/npp/npp/npp.h
0,0 → 1,98
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: npp.h,v 1.1 2005-02-25 10:45:36 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:45:36 $
------------
 
This file contains the Non Preemptive Protocol (NPP)
 
Title:
NPP (Non Preemptive Protocol)
 
Resource Models Accepted:
None
 
Description:
This module implement the Non Preemptive Protocol.
When a task aquire a critical section, it become non-preemptable.
 
A NPP mutex is created passing the NPP_mutexattr structure to mutex_init.
 
Exceptions raised:
XMUTEX_OWNER_KILLED
This exception is raised when a task ends and it owns one or more
mutexes
 
Restrictions & special features:
- This module is NOT Posix compliant
- This module can manage any number of NPP mutexes.
- if a task uses a NPP mutex, it can use only this type of mutex.
- If a task ends (because it reaches the end of the body or because it
is killed by someone) and it owns some mutex, an exception is raised.
- if a mutex unlock is called on a mutex not previously
locked or previously locked by another task an error is returned
- A NPP mutex can be statically allocated. To do this, the init function
have to define a macro that puts this information in the mutex
descriptor: mutexlevel = <NPP resource level>; opt = NULL;
for example, if the NPP module is registered at level 1, the macro is
like:
#define MUTEX_INITIALIZER {1,(void *)NULL}
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
 
#ifndef __NPP_H__
#define __NPP_H__
 
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
void NPP_register_module(void);
 
__END_DECLS
#endif
/shark/trunk/modules/npp/subdir.mk
0,0 → 1,0
OBJS += npp/npp.o
/shark/trunk/modules/npp/npp.c
0,0 → 1,196
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: npp.c,v 1.1 2005-02-25 10:45:36 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:45:36 $
------------
 
Non Preemptive Protocol. see npp.h for more details...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <npp/npp/npp.h>
 
#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/* The NPP resource level descriptor */
typedef struct {
mutex_resource_des m; /*+ the mutex interface +*/
 
int nlocked; /*+ how many mutex a task currently locks +*/
} NPP_mutex_resource_des;
 
 
#if 0
/*+ print resource protocol statistics...+*/
static void NPP_resource_status(RLEVEL r)
{
NPP_mutex_resource_des *m = (NPP_mutex_resource_des *)(resource_table[r]);
 
kern_printf("%d Resources owned by the tasks %d\n", m->nlocked, exec_shadow);
}
#endif
 
static int NPP_res_register(RLEVEL l, PID p, RES_MODEL *r)
{
/* NPP works with all tasks without Resource parameters */
return -1;
}
 
static void NPP_res_detach(RLEVEL l, PID p)
{
NPP_mutex_resource_des *m = (NPP_mutex_resource_des *)(resource_table[l]);
 
if (m->nlocked)
kern_raise(XMUTEX_OWNER_KILLED, p);
}
 
static int NPP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
{
if (a->mclass != NPP_MCLASS)
return -1;
 
m->mutexlevel = l;
m->opt = (void *)NIL;
 
return 0;
}
 
 
static int NPP_destroy(RLEVEL l, mutex_t *m)
{
// NPP_mutex_resource_des *lev = (NPP_mutex_resource_des *)(resource_table[l]);
 
if ( ((PID) m->opt) != NIL)
return (EBUSY);
 
return 0;
}
 
static int NPP_lock(RLEVEL l, mutex_t *m)
{
NPP_mutex_resource_des *lev;
SYS_FLAGS f;
 
f = kern_fsave();
 
if (((PID)m->opt) == exec_shadow) {
/* the task already owns the mutex */
kern_frestore(f);
return (EDEADLK);
}
 
/* p->opt == NIL (It can't be the case of p->opt != NIL and != exec_shadow
because when a task lock a mutex it become unpreemptable */
 
/* the mutex is free, We can lock it! */
lev = (NPP_mutex_resource_des *)(resource_table[l]);
 
if (!lev->nlocked) task_nopreempt();
lev->nlocked++;
 
m->opt = (void *)exec_shadow;
 
kern_frestore(f);
 
return 0;
}
 
// static int NPP_trylock(RLEVEL l, mutex_t *m) is a non-sense!
 
static int NPP_unlock(RLEVEL l, mutex_t *m)
{
NPP_mutex_resource_des *lev;
 
/* the mutex is mine */
lev = (NPP_mutex_resource_des *)(resource_table[l]);
lev->nlocked--;
 
m->opt = (void *)NIL;
 
if (!lev->nlocked) task_preempt();
 
return 0;
}
 
void NPP_register_module(void)
{
RLEVEL l; /* the level that we register */
NPP_mutex_resource_des *m; /* for readableness only */
 
printk("NPP_register_module\n");
 
/* request an entry in the level_table */
l = resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m = (NPP_mutex_resource_des *)kern_alloc(sizeof(NPP_mutex_resource_des));
 
/* update the level_table with the new entry */
resource_table[l] = (resource_des *)m;
 
/* fill the resource_des descriptor */
m->m.r.rtype = MUTEX_RTYPE;
m->m.r.res_register = NPP_res_register;
m->m.r.res_detach = NPP_res_detach;
 
/* fill the mutex_resource_des descriptor */
m->m.init = NPP_init;
m->m.destroy = NPP_destroy;
m->m.lock = NPP_lock;
m->m.trylock = NPP_lock; // !!!!!!!!!!!!
m->m.unlock = NPP_unlock;
 
/* fill the NPP_mutex_resource_des descriptor */
m->nlocked = 0;
}
 
/shark/trunk/modules/edf/edf.c
0,0 → 1,771
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors:
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* Anton Cervin
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: edf.c,v 1.1 2005-02-25 10:53:41 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:53:41 $
------------
 
This file contains the scheduling module EDF (Earliest Deadline First)
 
Read edf.h for further details.
 
**/
 
/*
* Copyright (C) 2000,2002 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <edf/edf/edf.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <tracer.h>
 
//#define EDF_DEBUG
#define edf_printf kern_printf
#ifdef EDF_DEBUG
char *pnow() {
static char buf[40];
struct timespec t;
sys_gettime(&t);
sprintf(buf, "%ld.%06ld", t.tv_sec, t.tv_nsec/1000);
return buf;
}
char *ptime1(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
char *ptime2(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
#endif
 
/* Statuses used in the level */
#define EDF_READY MODULE_STATUS_BASE /* ready */
#define EDF_IDLE MODULE_STATUS_BASE+1 /* idle, waiting for offset/eop */
#define EDF_WAIT MODULE_STATUS_BASE+2 /* to sleep, waiting for eop */
#define EDF_ZOMBIE MODULE_STATUS_BASE+3 /* to free, waiting for eop */
 
/* Task flags */
#define EDF_FLAG_SPORADIC 1 /* the task is sporadic */
#define EDF_FLAG_SPOR_LATE 2 /* sporadic task with period overrun */
 
 
/* Task descriptor */
typedef struct {
int flags; /* task flags */
TIME period; /* period (or inter-arrival interval) */
TIME rdeadline; /* relative deadline */
TIME offset; /* release offset */
struct timespec release; /* release time of current instance */
struct timespec adeadline; /* latest assigned deadline */
int dl_timer; /* deadline timer */
int eop_timer; /* end of period timer */
int off_timer; /* timer offset */
int dl_miss; /* deadline miss counter */
int wcet_miss; /* WCET miss counter */
int act_miss; /* activation miss counter */
int nact; /* number of pending periodic jobs */
} EDF_task_des;
 
 
/* Level descriptor */
typedef struct {
level_des l; /* standard level descriptor */
int flags; /* level flags */
IQUEUE ready; /* the ready queue */
bandwidth_t U; /* used bandwidth */
EDF_task_des tvec[MAX_PROC]; /* vector of task descriptors */
} EDF_level_des;
 
 
/* Module function cross-references */
static void EDF_intern_release(PID p, EDF_level_des *lev);
 
 
/**** Timer event handler functions ****/
 
/* This timer event handler is called at the end of the period */
static void EDF_timer_endperiod(void *par)
{
PID p = (PID) par;
EDF_level_des *lev = (EDF_level_des *)level_table[proc_table[p].task_level];
EDF_task_des *td = &lev->tvec[p];
td->eop_timer = -1;
 
if (proc_table[p].status == EDF_ZOMBIE) {
/* put the task in the FREE state */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/td->rdeadline) * proc_table[p].wcet;
return;
}
 
if (proc_table[p].status == EDF_WAIT) {
proc_table[p].status = SLEEP;
return;
}
if (td->flags & EDF_FLAG_SPORADIC) {
/* the task is sporadic and still busy, mark it as late */
td->flags |= EDF_FLAG_SPOR_LATE;
} else {
/* the task is periodic, release/queue another instance */
EDF_intern_release(p, lev);
}
}
 
/* This timer event handler is called when a task misses its deadline */
static void EDF_timer_deadline(void *par)
{
PID p = (PID) par;
EDF_level_des *lev = (EDF_level_des *)level_table[proc_table[p].task_level];
EDF_task_des *td = &lev->tvec[p];
 
td->dl_timer = -1;
 
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,
(unsigned short int)proc_table[p].context,0);
 
if (lev->flags & EDF_ENABLE_DL_EXCEPTION) {
kern_raise(XDEADLINE_MISS,p);
} else {
td->dl_miss++;
}
}
 
/* This timer event handler is called after waiting for an offset */
static void EDF_timer_offset(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
EDF_task_des *td = &lev->tvec[p];
 
td->off_timer = -1;
 
/* release the task now */
EDF_intern_release(p, lev);
 
}
 
/* This function is called when a guest task misses its deadline */
static void EDF_timer_guest_deadline(void *par)
{
PID p = (PID) par;
EDF_level_des *lev = (EDF_level_des *)level_table[proc_table[p].task_level];
EDF_task_des *td = &lev->tvec[p];
 
td->dl_timer = -1;
 
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,
(unsigned short int)proc_table[p].context,0);
 
kern_raise(XDEADLINE_MISS,p);
 
}
 
 
/**** Internal utility functions ****/
 
/* Release (or queue) a task, post deadline and endperiod timers */
static void EDF_intern_release(PID p, EDF_level_des *lev)
{
struct timespec temp;
EDF_task_des *td = &lev->tvec[p];
 
/* post deadline timer */
if (lev->flags & EDF_ENABLE_DL_CHECK) {
temp = td->release;
ADDUSEC2TIMESPEC(td->rdeadline, &temp);
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
td->dl_timer = kern_event_post(&temp,EDF_timer_deadline,(void *)p);
}
 
/* release or queue next job */
if (proc_table[p].status == EDF_IDLE) {
/* assign deadline, insert task in the ready queue */
proc_table[p].status = EDF_READY;
*iq_query_timespec(p,&lev->ready) = td->adeadline;
iq_timespec_insert(p,&lev->ready);
#ifdef EDF_DEBUG
edf_printf("At %s: releasing %s with deadline %s\n", pnow(),
proc_table[p].name, ptime1(&td->adeadline));
#endif
/* increase assigned deadline */
ADDUSEC2TIMESPEC(td->period, &td->adeadline);
/* reschedule */
event_need_reschedule();
} else {
/* queue */
td->nact++;
}
 
/* increase release time */
ADDUSEC2TIMESPEC(td->period, &td->release);
/* post end of period timer */
if (td->eop_timer != -1) {
kern_event_delete(td->eop_timer);
td->eop_timer = -1;
}
 
td->eop_timer = kern_event_post(&td->release, EDF_timer_endperiod,(void *)p);
 
TRACER_LOGEVENT(FTrace_EVT_task_timer,
(unsigned short int)proc_table[p].context,
(unsigned int)proc_table[p].task_level);
}
 
 
/**** Public generic kernel interface functions ****/
 
/* Returns the first task in the ready queue */
static PID EDF_public_scheduler(LEVEL l)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
return iq_query_first(&lev->ready);
}
 
/* Checks and decreases the available system bandwidth */
static int EDF_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
/* Called by task_create: Checks task model and creates a task */
static int EDF_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
HARD_TASK_MODEL *h;
 
if (m->pclass != HARD_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
h = (HARD_TASK_MODEL *)m;
if (!h->wcet || !h->mit) return -1;
if (h->drel > h->mit) return -1; /* only D <= T supported */
 
if (!h->drel) {
td->rdeadline = h->mit;
} else {
td->rdeadline = h->drel;
}
 
/* check the free bandwidth... */
if (lev->flags & EDF_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / td->rdeadline) * h->wcet;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b) {
lev->U += b;
} else {
return -1;
}
}
 
td->flags = 0;
if (h->periodicity == APERIODIC) {
td->flags |= EDF_FLAG_SPORADIC;
}
td->period = h->mit;
if (td->rdeadline == td->period) {
/* Ensure that D <= T-eps to make dl_timer trigger before eop_timer */
td->rdeadline = td->period - 1;
}
td->offset = h->offset;
td->dl_timer = -1;
td->eop_timer = -1;
td->off_timer = -1;
td->dl_miss = 0;
td->wcet_miss = 0;
td->act_miss = 0;
td->nact = 0;
 
/* Enable wcet check */
if (lev->flags & EDF_ENABLE_WCET_CHECK) {
proc_table[p].avail_time = h->wcet;
proc_table[p].wcet = h->wcet;
proc_table[p].control |= CONTROL_CAP; /* turn on measurement */
}
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
/* Reclaim the bandwidth used by the task */
static void EDF_public_detach(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
if (lev->flags & EDF_ENABLE_GUARANTEE) {
lev->U -= (MAX_BANDWIDTH / td->rdeadline) * proc_table[p].wcet;
}
}
 
/* Extracts the running task from the ready queue */
static void EDF_public_dispatch(LEVEL l, PID p, int nostop)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
iq_extract(p, &lev->ready);
}
 
/* Called when the task is preempted or when its budget is exhausted */
static void EDF_public_epilogue(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
/* check if the wcet is finished... */
if (lev->flags & EDF_ENABLE_WCET_CHECK) {
if (proc_table[p].avail_time <= 0) {
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,
(unsigned short int)proc_table[p].context,0);
if (lev->flags & EDF_ENABLE_WCET_EXCEPTION) {
kern_raise(XWCET_VIOLATION,p);
} else {
proc_table[p].control &= ~CONTROL_CAP;
td->wcet_miss++;
}
}
}
 
/* the task returns to the ready queue */
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
 
}
 
/* Called by task_activate or group_activate: Activates the task at time t */
static void EDF_public_activate(LEVEL l, PID p, struct timespec *t)
{
struct timespec clocktime;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
kern_gettime(&clocktime);
 
/* check if we are not in the SLEEP state */
if (proc_table[p].status != SLEEP) {
if (lev->flags & EDF_ENABLE_ACT_EXCEPTION) {
/* too frequent or wrongful activation: raise exception */
kern_raise(XACTIVATION,p);
} else {
/* skip the sporadic job, but increase a counter */
#ifdef EDF_DEBUG
edf_printf("At %s: activation of %s skipped\n", pnow(),
proc_table[p].name);
#endif
td->act_miss++;
}
return;
}
/* set the release time to the activation time + offset */
td->release = *t;
ADDUSEC2TIMESPEC(td->offset, &td->release);
 
/* set the absolute deadline to the activation time + offset + rdeadline */
td->adeadline = td->release;
ADDUSEC2TIMESPEC(td->rdeadline, &td->adeadline);
 
/* Check if release > clocktime. If yes, release it later,
otherwise release it now. */
 
proc_table[p].status = EDF_IDLE;
 
if (TIMESPEC_A_GT_B(&td->release, &clocktime)) {
/* release later, post an offset timer */
if (td->off_timer != -1) {
kern_event_delete(td->off_timer);
td->off_timer = -1;
}
td->off_timer = kern_event_post(&td->release,EDF_timer_offset,(void *)p);
} else {
/* release now */
EDF_intern_release(p, lev);
}
}
 
/* Reinserts a task that has been blocked into the ready queue */
static void EDF_public_unblock(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
}
 
/* Called when a task experiences a synchronization block */
static void EDF_public_block(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
. the deadline must remain...
 
So, we do nothing!!!
*/
}
 
/* Called by task_endcycle or task_sleep: Ends the current instance */
static int EDF_public_message(LEVEL l, PID p, void *m)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
switch((long)(m)) {
/* task_endcycle() */
case 0:
/* if there are no pending jobs */
if (td->nact == 0) {
/* remove deadline timer, if any */
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
if (td->flags & EDF_FLAG_SPORADIC) {
/* sporadic task */
if (!(td->flags & EDF_FLAG_SPOR_LATE)) {
proc_table[p].status = EDF_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
td->flags &= ~EDF_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
proc_table[p].status = EDF_IDLE;
}
} else {
/* we are late / there are pending jobs */
td->nact--;
/* compute and assign absolute deadline */
*iq_query_timespec(p,&lev->ready) = td->adeadline;
iq_timespec_insert(p,&lev->ready);
/* increase assigned deadline */
ADDUSEC2TIMESPEC(td->period, &td->adeadline);
#ifdef EDF_DEBUG
edf_printf("(Late) At %s: releasing %s with deadline %s\n",
pnow(),proc_table[p].name,ptime1(&td->adeadline));
#endif
}
break;
/* task_sleep() */
case 1:
/* remove deadline timer, if any */
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
if (td->flags & EDF_FLAG_SPORADIC) {
/* sporadic task */
if (!(td->flags & EDF_FLAG_SPOR_LATE)) {
proc_table[p].status = EDF_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
td->flags &= ~EDF_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
if (!(td->nact > 0)) {
/* we are on time. go to the EDF_WAIT state */
proc_table[p].status = EDF_WAIT;
} else {
/* we are late. delete pending activations and go to SLEEP */
td->nact = 0;
proc_table[p].status = SLEEP;
/* remove end of period timer */
if (td->eop_timer != -1) {
kern_event_delete(td->eop_timer);
td->eop_timer = -1;
}
}
}
break;
}
 
if (lev->flags & EDF_ENABLE_WCET_CHECK) {
proc_table[p].control |= CONTROL_CAP;
}
jet_update_endcycle(); /* Update the Jet data... */
proc_table[p].avail_time = proc_table[p].wcet;
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,
(unsigned short int)proc_table[p].context,(unsigned int)l);
return 0;
 
}
 
/* End the task and free the resources at the end of the period */
static void EDF_public_end(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
if (!(td->flags & EDF_FLAG_SPOR_LATE)) {
/* remove the deadline timer (if any) */
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
proc_table[p].status = EDF_ZOMBIE;
} else {
/* no endperiod timer will be fired, free the task now! */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/td->rdeadline) * proc_table[p].wcet;
}
}
 
/**** Private generic kernel interface functions (guest calls) ****/
 
/* Insert a guest task */
static void EDF_private_insert(LEVEL l, PID p, TASK_MODEL *m)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
JOB_TASK_MODEL *job;
 
if (m->pclass != JOB_PCLASS || (m->level != 0 && m->level != l) ) {
kern_raise(XINVALID_TASK, p);
return;
}
 
job = (JOB_TASK_MODEL *)m;
 
/* Insert task in the correct position */
*iq_query_timespec(p, &lev->ready) = job->deadline;
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
 
td->period = job->period;
 
if (!job->noraiseexc) {
td->dl_timer = kern_event_post(iq_query_timespec(p, &lev->ready),
EDF_timer_guest_deadline,(void *)p);
}
}
 
/* Dispatch a guest task */
static void EDF_private_dispatch(LEVEL l, PID p, int nostop)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* the task state is set to EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
/* Called when a guest task is preempted/out of budget */
static void EDF_private_epilogue(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* the task has been preempted. it returns into the ready queue... */
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
}
 
/* Extract a guest task */
static void EDF_private_extract(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
if (proc_table[p].status == EDF_READY)
iq_extract(p, &lev->ready);
 
/* we remove the deadline timer, because the slice is finished */
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
 
}
 
 
/**** Level registration function ****/
 
LEVEL EDF_register_level(int flags)
{
LEVEL l; /* the level that we register */
EDF_level_des *lev; /* for readableness only */
int i;
 
printk("EDF_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(EDF_level_des));
 
lev = (EDF_level_des *)level_table[l];
 
/* fill the standard descriptor */
lev->l.private_insert = EDF_private_insert;
lev->l.private_extract = EDF_private_extract;
lev->l.private_dispatch = EDF_private_dispatch;
lev->l.private_epilogue = EDF_private_epilogue;
 
lev->l.public_scheduler = EDF_public_scheduler;
if (flags & EDF_ENABLE_GUARANTEE)
lev->l.public_guarantee = EDF_public_guarantee;
else
lev->l.public_guarantee = NULL;
 
lev->l.public_create = EDF_public_create;
lev->l.public_detach = EDF_public_detach;
lev->l.public_end = EDF_public_end;
lev->l.public_dispatch = EDF_public_dispatch;
lev->l.public_epilogue = EDF_public_epilogue;
lev->l.public_activate = EDF_public_activate;
lev->l.public_unblock = EDF_public_unblock;
lev->l.public_block = EDF_public_block;
lev->l.public_message = EDF_public_message;
 
iq_init(&lev->ready, &freedesc, 0);
 
lev->flags = flags;
if (lev->flags & EDF_ENABLE_WCET_EXCEPTION) {
lev->flags |= EDF_ENABLE_WCET_CHECK;
}
if (lev->flags & EDF_ENABLE_DL_EXCEPTION) {
lev->flags |= EDF_ENABLE_DL_CHECK;
}
 
lev->U = 0;
 
for (i=0;i<MAX_PROC;i++) {
EDF_task_des *td = &lev->tvec[i];
td->flags = 0;
td->dl_timer = -1;
td->eop_timer = -1;
td->off_timer = -1;
td->dl_miss = 0;
td->wcet_miss = 0;
td->act_miss = 0;
td->nact = 0;
}
 
return l;
}
 
 
/**** Public utility functions ****/
 
/* Get the bandwidth used by the level */
bandwidth_t EDF_usedbandwidth(LEVEL l)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
return lev->U;
}
 
/* Get the number of missed deadlines for a task */
int EDF_get_dl_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
return td->dl_miss;
}
 
/* Get the number of execution overruns for a task */
int EDF_get_wcet_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
 
return td->wcet_miss;
}
 
/* Get the number of skipped activations for a task */
int EDF_get_act_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
return td->act_miss;
}
 
/* Get the current number of queued activations for a task */
int EDF_get_nact(PID p)
{
LEVEL l = proc_table[p].task_level;
 
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
EDF_task_des *td = &lev->tvec[p];
return td->nact;
}
 
/shark/trunk/modules/edf/subdir.mk
0,0 → 1,0
OBJS += edf/edf.o
/shark/trunk/modules/edf/edf/edf.h
0,0 → 1,184
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* Anton Cervin
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: edf.h,v 1.1 2005-02-25 10:53:41 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:53:41 $
------------
 
This file contains the scheduling module EDF (Earliest Deadline First)
 
Title:
EDF (Earliest Deadline First)
 
Task Models Accepted:
HARD_TASK_MODEL - Hard Tasks (Periodic and Sporadic)
wcet field and mit field must be != 0. They are used to set the wcet
and period of the tasks.
periodicity field can be either PERIODIC or APERIODIC
drel field must be <= mit. NOTE 1: a drel of 0 is interpreted as mit.
NOTE 2: The utilization of the task is computed as wcet/drel.
offset field specifies a release offset relative to task_activate or
group_activate.
 
Guest Models Accepted:
JOB_TASK_MODEL - a single guest task activation
Identified by an absolute deadline and a period.
period field is ignored
 
Description:
This module schedules periodic and sporadic tasks based on their
absolute deadlines. The task guarantee is based on a simple
utilization approach. The utilization factor of a task is computed
as wcet/drel. (By default, drel = mit.) A periodic task must only
be activated once; subsequent activations are triggered by an
internal timer. By contrast, an sporadic task must be explicitely
activated for each instance. NO GUARANTEE is performed on guest
tasks. The guarantee must be performed by the level that inserts
guest tasks in the EDF level.
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests of this type. When a guest
operation is called, the exception is raised.
 
The following exceptions may be raised by the module:
XDEADLINE_MISS
If a task misses its deadline and the EDF_ENABLE_DL_EXCEPTION
flag is set, this exception is raised.
 
XWCET_VIOLATION
If a task executes longer than its declared wcet and the
EDF_ENABLE_WCET_EXCEPTION flag is set, this exception is raised.
 
XACTIVATION
If a sporadic task is activated more often than its declared mit
and the EDF_ENABLE_ACT_EXCEPTION flag is set, this exception is
raised. This exception is also raised if a periodic task is
activated while not in the SLEEP state.
 
Restrictions & special features:
 
- Relative deadlines drel <= mit may be specified.
- An offset > 0 will delay the activation of the task by the same
amount of time. To synchronize a group of tasks, assign suitable
offsets and then use the group_activate function.
- This level doesn't manage the main task.
- The level uses the priority and timespec_priority fields.
- The guest tasks don't provide the guest_endcycle function.
- At init time, the user can specify the behavior in case of
deadline and wcet overruns. The following flags are available:
 
(No flags enabled) - Deadline and wcet overruns are ignored.
Pending periodic jobs are queued and are
eventually scheduled with correct deadlines
according to their original arrival times.
Sporadic tasks that arrive to often are
simply dropped.
EDF_ENABLE_DL_CHECK - When a deadline overrun occurs, the
dl_miss counter of the task is increased.
Same behavior for pending jobs as above.
EDF_ENABLE_WCET_CHECK - When a wcet overrun occurs, the
wcet_miss counter of the task is increased.
Same behavior for pending jobs as above.
EDF_ENABLE_DL_EXCEPTION - When a deadline overrun occurs, an
exception is raised.
EDF_ENABLE_WCET_EXCEPTION - When a wcet overrun occurs, an
exception is raised.
EDF_ENABLE_ACT_EXCEPTION When a periodic or sporadic task is activated
too often, an exception is raised.
 
- The functions EDF_get_dl_miss, EDF_get_wcet_miss, EDF_get_act_miss,
and EDF_get_nact can be used to find out the number of missed
deadlines, the number of wcet overruns, the number of skipped
activations, and the number of currently queued periodic activations.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __EDF_H__
#define __EDF_H__
 
#include <ll/ll.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/* Level flags */
#define EDF_DISABLE_ALL 0
#define EDF_ENABLE_GUARANTEE 1 /* Task guarantee enabled */
#define EDF_ENABLE_WCET_CHECK 2 /* Wcet monitoring enabled */
#define EDF_ENABLE_DL_CHECK 4 /* Deadline monitoring enabled */
#define EDF_ENABLE_WCET_EXCEPTION 8 /* Wcet overrun exception enabled */
#define EDF_ENABLE_DL_EXCEPTION 16 /* Deadline overrun exception enabled */
#define EDF_ENABLE_ACT_EXCEPTION 32 /* Activation exception enabled */
#define EDF_ENABLE_ALL 63 /* All flags enabled */
 
/* Registration function */
LEVEL EDF_register_level(int flags);
 
 
/**** Public utility functions ****/
 
/* Get the bandwidth used by the level */
bandwidth_t EDF_usedbandwidth(LEVEL l);
 
/* Get the number of missed deadlines for a task */
int EDF_get_dl_miss(PID p);
 
/* Get the number of execution overruns for a task */
int EDF_get_wcet_miss(PID p);
 
/* Get the number of skipped activations for a task */
int EDF_get_act_miss(PID p);
 
/* Get the current number of queued activations for a task */
int EDF_get_nact(PID p);
 
 
__END_DECLS
#endif
/shark/trunk/modules/posix/posix/posix.h
0,0 → 1,154
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: posix.h,v 1.1 2005-02-25 10:46:36 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:46:36 $
------------
 
This file contains the scheduling module compatible with POSIX
specifications
 
Title:
POSIX version 1
 
Task Models Accepted:
NRT_TASK_MODEL - Non-Realtime Tasks
weight field is ignored
slice field is used to set the slice of a task, if it is !=0
policy field is ignored
inherit field is ignored
 
Description:
This module schedule his tasks following the POSIX specifications...
 
A task can be scheduled in a Round Robin way or in a FIFO way.
The tasks have also a priority field.
 
The slices can be different one task from one another.
 
The module can SAVE or SKIP activations
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests. When a guest operation
is called, the exception is raised.
 
Restrictions & special features:
- if specified, it creates at init time a task,
called "Main", attached to the function __init__().
- There must be only one module in the system that creates a task
attached to the function __init__().
- The level tries to guarantee that a RR task uses a "full" timeslice
before going to the queue tail. "full" means that a task can execute
a maximum time of slice+sys_tick due to the approx. done by
the Virtual Machine. If a task execute more time than the slice,
the next time it execute less...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __POSIX_H__
#define __POSIX_H__
 
#include <ll/ll.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
extern TASK __init__(void *arg);
 
 
 
/*+ Const: +*/
#define POSIX_MINIMUM_SLICE 1000 /*+ Minimum Timeslice +*/
#define POSIX_MAXIMUM_SLICE 500000 /*+ Maximum Timeslice +*/
 
#define POSIX_MAIN_YES 1 /*+ The level creates the main +*/
#define POSIX_MAIN_NO 0 /*+ The level does'nt create the main +*/
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified
 
returns the level number at which the module has been registered.
+*/
LEVEL POSIX_register_level(TIME slice,
int createmain,
struct multiboot_info *mb,
int prioritylevels);
 
/*+ this function forces the running task to go to his queue tail,
then calls the scheduler and changes the context
(it works only on the POSIX level) +*/
int POSIX_sched_yield(LEVEL l);
 
/* the following functions have to be called with interruptions DISABLED! */
 
/*+ this function returns the maximum level allowed for the POSIX level +*/
int POSIX_get_priority_max(LEVEL l);
 
/*+ this function returns the default timeslice for the POSIX level +*/
int POSIX_rr_get_interval(LEVEL l);
 
/*+ this functions returns some paramaters of a task;
policy must be NRT_RR_POLICY or NRT_FIFO_POLICY;
priority must be in the range [0..prioritylevels]
returns ENOSYS or ESRCH if there are problems +*/
int POSIX_getschedparam(LEVEL l, PID p, int *policy, int *priority);
 
/*+ this functions sets paramaters of a task +*/
int POSIX_setschedparam(LEVEL l, PID p, int policy, int priority);
 
__END_DECLS
#endif
 
/*
MANCANO
13.3.6 GETPRIORITYMin da mettere a 0
*/
/shark/trunk/modules/posix/posix.c
0,0 → 1,581
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: posix.c,v 1.1 2005-02-25 10:46:36 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:46:36 $
------------
 
This file contains the scheduling module compatible with POSIX
specifications
 
Read posix.h for further details.
 
RR tasks have the CONTROL_CAP bit set
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARR2ANTY; without even the implied waRR2anty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <posix/posix/posix.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
#include <modules/comm_message.h>
 
/*+ Status used in the level +*/
#define POSIX_READY MODULE_STATUS_BASE
 
/*+ Use for change level in POSIX +*/
#define POSIX_CHANGE_LEVEL 1
 
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
int priority[MAX_PROC]; /*+ priority of each task +*/
 
IQUEUE *ready; /*+ the ready queue array +*/
 
int slice; /*+ the level's time slice +*/
 
struct multiboot_info *multiboot; /*+ used if the level have to insert
the main task +*/
int maxpriority; /*+ the priority are from 0 to maxpriority
(i.e 0 to 31) +*/
 
int yielding; /*+ equal to 1 when a sched_yield is called +*/
 
/* introduce for changing level in POSIX */
int flag[MAX_PROC];
 
int new_level[MAX_PROC];
int new_slice[MAX_PROC];
int new_control[MAX_PROC];
 
} POSIX_level_des;
 
/* This is not efficient but very fair :-)
The need of all this stuff is because if a task execute a long time
due to (shadow!) priority inheritance, then the task shall go to the
tail of the queue many times... */
static PID POSIX_public_scheduler(LEVEL l)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
PID p;
 
int prio;
 
prio = lev->maxpriority;
 
for (;;) {
p = iq_query_first(&lev->ready[prio]);
if (p == NIL) {
if (prio) {
prio--;
continue;
}
else
return NIL;
}
 
if ((proc_table[p].control & CONTROL_CAP) &&
(proc_table[p].avail_time <= 0)) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_extract(p,&lev->ready[prio]);
iq_insertlast(p,&lev->ready[prio]);
}
else
return p;
}
}
 
static int POSIX_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
NRT_TASK_MODEL *nrt;
 
if (m->pclass != NRT_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
 
nrt = (NRT_TASK_MODEL *)m;
 
/* the task state is set at SLEEP by the general task_create */
 
/* I used the wcet field because using wcet can account if a task
consume more than the timeslice... */
 
if (nrt->inherit == NRT_INHERIT_SCHED &&
proc_table[exec_shadow].task_level == l) {
/* We inherit the scheduling properties if the scheduling level
*is* the same */
lev->priority[p] = lev->priority[exec_shadow];
proc_table[p].avail_time = proc_table[exec_shadow].avail_time;
proc_table[p].wcet = proc_table[exec_shadow].wcet;
 
proc_table[p].control = (proc_table[p].control & ~CONTROL_CAP) |
(proc_table[exec_shadow].control & CONTROL_CAP);
lev->nact[p] = (lev->nact[exec_shadow] == -1) ? -1 : 0;
}
else {
lev->priority[p] = nrt->weight;
if (nrt->slice) {
proc_table[p].avail_time = nrt->slice;
proc_table[p].wcet = nrt->slice;
}
else {
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
}
if (nrt->policy == NRT_RR_POLICY)
proc_table[p].control |= CONTROL_CAP;
if (nrt->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
}
 
lev->flag[p] = 0;
 
return 0; /* OK */
}
 
static void POSIX_public_dispatch(LEVEL l, PID p, int nostop)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready[lev->priority[p]]);
}
 
static void POSIX_public_epilogue(LEVEL l, PID p)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
/* Change task level */
if (lev->flag[p] & POSIX_CHANGE_LEVEL) {
 
STD_command_message msg;
 
proc_table[p].status = SLEEP;
proc_table[p].task_level = lev->new_level[p];
msg.command = STD_ACTIVATE_TASK;
level_table[lev->new_level[p]] -> public_message(lev->new_level[p],p,&msg);
return;
 
}
 
if (lev->yielding) {
lev->yielding = 0;
iq_insertlast(p,&lev->ready[lev->priority[p]]);
}
/* check if the slice is finished and insert the task in the coPOSIXect
qqueue position */
else if (proc_table[p].control & CONTROL_CAP &&
proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_insertlast(p,&lev->ready[lev->priority[p]]);
}
else
iq_insertfirst(p,&lev->ready[lev->priority[p]]);
 
proc_table[p].status = POSIX_READY;
}
 
static void POSIX_public_activate(LEVEL l, PID p, struct timespec *t)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
/* Test if we are trying to activate a non sleeping task */
/* save activation (only if needed...) */
if (proc_table[p].status != SLEEP) {
if (lev->nact[p] != -1)
lev->nact[p]++;
return;
}
 
/* Insert task in the correct position */
proc_table[p].status = POSIX_READY;
iq_insertlast(p,&lev->ready[lev->priority[p]]);
}
 
static void POSIX_public_unblock(LEVEL l, PID p)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
/* Similar to POSIX_task_activate, but we don't check in what state
the task is */
 
/* Insert task in the coPOSIXect position */
proc_table[p].status = POSIX_READY;
iq_insertlast(p,&lev->ready[lev->priority[p]]);
}
 
static void POSIX_public_block(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
 
So, we do nothing!!!
*/
}
 
#ifdef OLDVERSION
static int POSIX_public_message(LEVEL l, PID p, void *m)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
if (lev->nact[p] > 0) {
/* continue!!!! */
lev->nact[p]--;
iq_insertfirst(p,&lev->ready[lev->priority[p]]);
proc_table[p].status = POSIX_READY;
}
else
proc_table[p].status = SLEEP;
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
return 0;
}
#else
static int POSIX_public_message(LEVEL l, PID p, void *m)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
STD_command_message *msg;
NRT_TASK_MODEL *nrt;
 
/* Task Endcycle */
switch ((long)(m)) {
case (long)(NULL):
if (lev->nact[p] > 0) {
/* continue!!!! */
lev->nact[p]--;
iq_insertfirst(p,&lev->ready[lev->priority[p]]);
proc_table[p].status = POSIX_READY;
} else
proc_table[p].status = SLEEP;
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
break;
 
/* Task Disable */
case (long)(1):
 
break;
 
default:
 
msg = (STD_command_message *)m;
 
switch(msg->command) {
case STD_SET_NEW_LEVEL:
lev->flag[p] |= POSIX_CHANGE_LEVEL;
lev->new_level[p] = (int)(msg->param);
 
break;
case STD_SET_NEW_MODEL:
nrt = (NRT_TASK_MODEL *)(msg->param);
 
lev->priority[p] = nrt->weight;
if (nrt->slice) {
lev->new_slice[p] = nrt->slice;
} else {
lev->new_slice[p] = 0;
}
if (nrt->policy == NRT_RR_POLICY)
lev->new_control[p] |= CONTROL_CAP;
if (nrt->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
lev->flag[p] = 0;
 
break;
 
case STD_ACTIVATE_TASK:
 
if (lev->new_slice[p]) {
proc_table[p].avail_time = lev->new_slice[p];
proc_table[p].wcet = lev->new_slice[p];
} else {
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
}
 
proc_table[p].control = lev->new_control[p];
 
POSIX_public_activate(l,p, NULL);
 
break;
}
 
break;
 
}
return 0;
}
 
#endif
 
static void POSIX_public_end(LEVEL l, PID p)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
lev->nact[p] = -1;
 
/* then, we insert the task in the free queue */
proc_table[p].status = FREE;
iq_priority_insert(p,&freedesc);
}
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
static void POSIX_call_main(void *l)
{
LEVEL lev;
PID p;
NRT_TASK_MODEL m;
void *mb;
 
lev = (LEVEL)l;
 
nrt_task_default_model(m);
nrt_task_def_level(m,lev); /* with this we are sure that the task aPOSIXives
to the coPOSIXect level */
 
mb = ((POSIX_level_des *)level_table[lev])->multiboot;
nrt_task_def_arg(m,mb);
nrt_task_def_usemath(m);
nrt_task_def_nokill(m);
nrt_task_def_ctrl_jet(m);
nrt_task_def_weight(m,0);
nrt_task_def_policy(m,NRT_RR_POLICY);
nrt_task_def_inherit(m,NRT_EXPLICIT_SCHED);
 
p = task_create("Main", __init__, (TASK_MODEL *)&m, NULL);
 
if (p == NIL)
printk("\nPanic!!! can't create main task...\n");
 
POSIX_public_activate(lev,p,NULL);
}
 
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified +*/
LEVEL POSIX_register_level(TIME slice,
int createmain,
struct multiboot_info *mb,
int prioritylevels)
{
LEVEL l; /* the level that we register */
POSIX_level_des *lev; /* for readableness only */
PID i; /* a counter */
int x; /* a counter */
 
printk("POSIX_register_level\n");
 
l = level_alloc_descriptor(sizeof(POSIX_level_des));
 
lev = (POSIX_level_des *)level_table[l];
 
/* fill the standard descriptor */
lev->l.public_scheduler = POSIX_public_scheduler;
lev->l.public_create = POSIX_public_create;
lev->l.public_end = POSIX_public_end;
lev->l.public_dispatch = POSIX_public_dispatch;
lev->l.public_epilogue = POSIX_public_epilogue;
lev->l.public_activate = POSIX_public_activate;
lev->l.public_unblock = POSIX_public_unblock;
lev->l.public_block = POSIX_public_block;
lev->l.public_message = POSIX_public_message;
lev->l.public_guarantee = NULL;
 
/* fill the POSIX descriptor part */
for (i = 0; i < MAX_PROC; i++) {
lev->nact[i] = -1;
lev->flag[i] = 0 ;
lev->new_level[i] = -1;
lev->new_slice[i] = -1;
lev->new_control[i] = 0;
}
 
lev->maxpriority = prioritylevels -1;
 
lev->ready = (IQUEUE *)kern_alloc(sizeof(IQUEUE) * prioritylevels);
 
for (x = 0; x < prioritylevels; x++)
iq_init(&lev->ready[x], &freedesc, 0);
 
if (slice < POSIX_MINIMUM_SLICE) slice = POSIX_MINIMUM_SLICE;
if (slice > POSIX_MAXIMUM_SLICE) slice = POSIX_MAXIMUM_SLICE;
lev->slice = slice;
 
lev->multiboot = mb;
 
if (createmain)
sys_atrunlevel(POSIX_call_main,(void *) l, RUNLEVEL_INIT);
 
return l;
}
 
/*+ this function forces the running task to go to his queue tail;
(it works only on the POSIX level) +*/
int POSIX_sched_yield(LEVEL l)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
if (proc_table[exec_shadow].task_level != l)
return -1;
 
proc_table[exec_shadow].context = kern_context_save();
lev->yielding = 1;
scheduler();
kern_context_load(proc_table[exec_shadow].context);
return 0;
}
 
/*+ this function returns the maximum level allowed for the POSIX level +*/
int POSIX_get_priority_max(LEVEL l)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
return lev->maxpriority;
}
 
/*+ this function returns the default timeslice for the POSIX level +*/
int POSIX_rr_get_interval(LEVEL l)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
return lev->slice;
}
 
/*+ this functions returns some paramaters of a task;
policy must be NRT_RR_POLICY or NRT_FIFO_POLICY;
priority must be in the range [0..prioritylevels]
returns ENOSYS or ESRCH if there are problems +*/
int POSIX_getschedparam(LEVEL l, PID p, int *policy, int *priority)
{
if (p<0 || p>= MAX_PROC || proc_table[p].status == FREE)
return ESRCH;
 
if (proc_table[p].task_level != l)
return ENOSYS;
 
if (proc_table[p].control & CONTROL_CAP)
*policy = NRT_RR_POLICY;
else
*policy = NRT_FIFO_POLICY;
 
*priority = ((POSIX_level_des *)(level_table[l]))->priority[p];
 
return 0;
}
 
/*+ this functions sets paramaters of a task +*/
int POSIX_setschedparam(LEVEL l, PID p, int policy, int priority)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
if (p<0 || p>= MAX_PROC || proc_table[p].status == FREE)
return ESRCH;
 
if (proc_table[p].task_level != l)
return ENOSYS;
 
if (policy == SCHED_RR)
proc_table[p].control |= CONTROL_CAP;
else if (policy == SCHED_FIFO)
proc_table[p].control &= ~CONTROL_CAP;
else
return EINVAL;
 
if (lev->priority[p] != priority) {
if (proc_table[p].status == POSIX_READY) {
iq_extract(p,&lev->ready[lev->priority[p]]);
lev->priority[p] = priority;
iq_insertlast(p,&lev->ready[priority]);
}
else
lev->priority[p] = priority;
}
 
return 0;
}
 
 
 
/shark/trunk/modules/posix/subdir.mk
0,0 → 1,0
OBJS += posix/posix.o
/shark/trunk/modules/pc/pc/pc.h
0,0 → 1,115
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: pc.h,v 1.1 2005-02-25 10:40:58 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:40:58 $
------------
 
This file contains the Priority Ceiling (PC) Protocol
 
Title:
PC (Priority Ceiling protocol)
 
Resource Models Accepted:
PC_RES_MODEL
This model is used to tell a PC level the priority of a task.
 
Description:
This module implement the Priority Ceiling Protocol.
The priority inheritance is made using the shadow field of the
task descriptor. No difference is made upon the task model of the
tasks that use PC mutexes.
 
This module is directly derived from the PI one.
 
A PC mutex is created passing the PC_mutexattr structure to mutex_init.
 
When a task is created, a priority must be assigned to the task. This
priority is specified using a PC_RES_MODEL resource model.
 
Exceptions raised:
XMUTEX_OWNER_KILLED
This exception is raised when a task ends and it owns one or more
mutexes
 
Restrictions & special features:
- This module is NOT Posix compliant
- This module can manage any number of PC mutexes.
- If a task ends (because it reaches the end of the body or because it
is killed by someone) and it owns some mutex, an exception is raised.
- if a mutex unlock is called on a mutex not previously
locked or previously locked by another task an exception is raised
- A PC mutex can't be statically allocated.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
 
#ifndef __PC_H__
#define __PC_H__
 
#include <kernel/types.h>
#include <kernel/descr.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
RLEVEL PC_register_module(void);
 
/*+ This function gets the ceiling of a PC mutex, and it have to be called
only by a task that owns the mutex.
Returns -1 if the mutex is not a PC mutex, 0 otherwise +*/
int PC_get_mutex_ceiling(const mutex_t *mutex, DWORD *ceiling);
 
/*+ This function sets the ceiling of a PC mutex, and it have to be called
only by a task that owns the mutex.
Returns -1 if the mutex is not a PC mutex, 0 otherwise +*/
int PC_set_mutex_ceiling(mutex_t *mutex, DWORD ceiling, DWORD *old_ceiling);
 
/*+ This function sets the ceiling of a task +*/
void PC_set_task_ceiling(RLEVEL r, PID p, DWORD priority);
 
__END_DECLS
#endif
/shark/trunk/modules/pc/pc.c
0,0 → 1,499
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: pc.c,v 1.1 2005-02-25 10:40:58 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:40:58 $
------------
 
Priority Ceiling protocol. see pc.h for more details...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <pc/pc/pc.h>
 
#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
typedef struct PC_mutexstruct_t PC_mutex_t;
 
/* The PC resource level descriptor */
typedef struct {
mutex_resource_des m; /*+ the mutex interface +*/
 
int nlocked[MAX_PROC]; /*+ how many mutex a task currently locks +*/
 
PC_mutex_t *mlist; /*+ the list of the busy mutexes +*/
DWORD priority[MAX_PROC]; /*+ the PC priority of the tasks in the system +*/
 
PID blocked[MAX_PROC];
 
} PC_mutex_resource_des;
 
/* this is the structure normally pointed by the opt field in the
mutex_t structure */
struct PC_mutexstruct_t {
PID owner;
int nblocked;
PID firstblocked;
 
DWORD ceiling;
PC_mutex_t *next;
PC_mutex_t *prev;
};
 
/* This is the test done when a task try to lock a mutex.
It checks if the system ceiling is less than the process priority
It returns 1 if the task can lock the mutex, 0 otherwise */
static int PC_accept(PC_mutex_resource_des *lev, DWORD prio)
{
PC_mutex_t *l = lev->mlist;
 
while (l) {
if (l->owner != exec_shadow)
/* l points to a mutex owned by another task. Its ceiling is the
system ceiling... */
return prio < l->ceiling;
 
l = l->next;
}
 
/* no busy mutexes other than mine!!! */
return 1;
}
 
/* this function inserts a mutex in the mutex list.
the code is similar to q_insert of queue.c */
static void PC_insert(PC_mutex_resource_des *lev, PC_mutex_t * m)
{
DWORD prio;
PC_mutex_t *p, *q;
 
p = NULL;
q = lev->mlist;
prio = m->ceiling;
 
while ((q != NULL) && (prio >= q->ceiling)) {
p = q;
q = q->next;
}
 
if (p != NULL)
p->next = m;
else
lev->mlist = m;
 
if (q != NULL) q->prev = m;
 
m->next = q;
m->prev = p;
}
 
/* this function extracts a mutex in the mutex list.
the code is similar to q_extract of queue.c */
static void PC_extract(PC_mutex_resource_des *lev, PC_mutex_t * m)
{
PC_mutex_t *p, *q;
 
//kern_printf("extract: prev=%d next = %d\n",m->prev, m->next);
p = m->prev;
q = m->next;
 
if (p == NULL) lev->mlist = q;
else p->next = m->next;
 
if (q != NULL) q->prev = m->prev;
}
 
 
#if 0
/*+ print resource protocol statistics...+*/
static void PC_resource_status(RLEVEL r)
{
PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[r]);
PID i;
 
kern_printf("Resources owned by the tasks:\n");
for (i=0; i<MAX_PROC; i++) {
kern_printf("%-4d", m->nlocked[i]);
}
 
kern_printf("\nPC priority of the tasks:\n");
for (i=0; i<MAX_PROC; i++) {
kern_printf("%-4ld", m->priority[i]);
}
// in the future: print the status of the blocked semaphores!
 
}
#endif
 
static int PC_res_register(RLEVEL l, PID p, RES_MODEL *r)
{
PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[l]);
PC_RES_MODEL *pc;
 
if (r->rclass != PC_RCLASS)
return -1;
if (r->level && r->level !=l)
return -1;
 
pc = (PC_RES_MODEL *)r;
 
m->priority[p] = pc->priority;
m->nlocked[p] = 0;
 
return 0;
}
 
static void PC_res_detach(RLEVEL l, PID p)
{
PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[l]);
 
if (m->nlocked[p])
kern_raise(XMUTEX_OWNER_KILLED, p);
else
m->nlocked[p] = 0;
 
m->priority[p] = MAX_DWORD;
}
 
static int PC_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
{
PC_mutex_t *p;
 
if (a->mclass != PC_MCLASS)
return -1;
 
p = (PC_mutex_t *) kern_alloc(sizeof(PC_mutex_t));
 
/* control if there is enough memory; no control on init on a
non- destroyed mutex */
 
if (!p)
return (ENOMEM);
 
p->owner = NIL;
p->nblocked = 0;
p->firstblocked = NIL;
 
p->ceiling = ((PC_mutexattr_t *)a)->ceiling;
p->next = 0;
 
 
m->mutexlevel = l;
m->opt = (void *)p;
 
return 0;
}
 
 
static int PC_destroy(RLEVEL l, mutex_t *m)
{
// PC_mutex_resource_des *lev = (PC_mutex_resource_des *)(resource_table[l]);
SYS_FLAGS f;
 
if ( ((PC_mutex_t *)m->opt)->nblocked)
return (EBUSY);
 
f = kern_fsave();
if (m->opt) {
kern_free(m->opt,sizeof(PC_mutex_t));
m->opt = NULL;
}
kern_frestore(f);
 
return 0;
}
 
/* see pi.c for informations on the blocking algorithm used */
static int PC_lock(RLEVEL l, mutex_t *m)
{
PC_mutex_resource_des *lev = (PC_mutex_resource_des *)(resource_table[l]);
PC_mutex_t *p;
SYS_FLAGS f;
 
f = kern_fsave();
 
p = (PC_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, return an error! */
kern_frestore(f);
return (EINVAL);
}
 
if (p->owner == exec_shadow) {
/* the task already owns the mutex */
kern_frestore(f);
return (EDEADLK);
}
 
if (p->ceiling > lev->priority[exec_shadow]) {
/* see POSIX standard p. 258 */
kern_frestore(f);
return (EINVAL);
}
 
while (!PC_accept(lev, lev->priority[exec_shadow])) {
/* the mutex is locked by someone,
or another mutex with greater ceiling is busy,
"block" the task on the busy mutex with the highest ceiling
(pointed by lev->mlist)...*/
 
//kern_printf("Blocking on %d, owner=%d, exec_shadow=%d\n",lev->mlist,lev->mlist->owner,exec_shadow);
proc_table[exec_shadow].shadow = lev->mlist->owner;
lev->blocked[exec_shadow] = lev->mlist->firstblocked;
lev->mlist->firstblocked = exec_shadow;
lev->mlist->nblocked++;
 
/* ... call the scheduler... */
scheduler();
//kern_printf("schedule: exec=%d, exec_shadow=%d\n",exec,exec_shadow);
TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
kern_context_load(proc_table[exec_shadow].context);
 
/* ... and reaquire the cli() before the test... */
kern_cli();
}
 
/* the mutex is free, We can lock it! */
lev = (PC_mutex_resource_des *)(resource_table[l]);
lev->nlocked[exec_shadow]++;
 
p->owner = exec_shadow;
 
PC_insert(lev, p);
 
kern_frestore(f);
 
return 0;
}
 
static int PC_trylock(RLEVEL l, mutex_t *m)
{
PC_mutex_resource_des *lev = (PC_mutex_resource_des *)(resource_table[l]);
PC_mutex_t *p;
SYS_FLAGS f;
 
f = kern_fsave();
 
p = (PC_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, return an error! */
kern_frestore(f);
return (EINVAL);
}
 
if (p->owner == exec_shadow) {
/* the task already owns the mutex */
kern_frestore(f);
return (EDEADLK);
}
 
if (p->ceiling < lev->priority[exec_shadow]) {
/* see POSIX standard p. 258 */
kern_frestore(f);
return (EINVAL);
}
 
while (!PC_accept(lev, lev->priority[exec_shadow])) {
/* a task already owns the mutex */
kern_frestore(f);
return (EBUSY);
}
 
/* the mutex is free, We can lock it! */
lev = (PC_mutex_resource_des *)(resource_table[l]);
lev->nlocked[exec_shadow]++;
 
p->owner = exec_shadow;
 
PC_insert(lev, p);
 
kern_frestore(f);
 
return 0;
}
 
static int PC_unlock(RLEVEL l, mutex_t *m)
{
PC_mutex_resource_des *lev;
PC_mutex_t *p;
int i, j;
 
p = (PC_mutex_t *)m->opt;
if (!p)
return (EINVAL);
 
if (p->owner != exec_shadow) {
/* the mutex is owned by another task!!! */
kern_sti();
return (EPERM);
}
 
proc_table[exec_shadow].context = kern_context_save();
 
/* the mutex is mine */
lev = (PC_mutex_resource_des *)(resource_table[l]);
lev->nlocked[exec_shadow]--;
 
p->owner = NIL;
 
/* we unblock all the waiting tasks... */
i = p->firstblocked;
p->firstblocked = NIL;
 
while (i != NIL) {
proc_table[i].shadow = j = i;
i = lev->blocked[i];
lev->blocked[j] = NIL;
}
p->nblocked = 0;
 
PC_extract(lev, p);
 
/* {
int xxx;
kern_printf("(PC_unlock owner=%d ",p->owner);
for (xxx = 0; xxx<5; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow);
kern_printf(")\n");
}*/
 
scheduler();
TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
kern_context_load(proc_table[exec_shadow].context);
 
return 0;
}
 
RLEVEL PC_register_module(void)
{
RLEVEL l; /* the level that we register */
PC_mutex_resource_des *m; /* for readableness only */
PID i; /* a counter */
 
printk("PC_register_module\n");
 
/* request an entry in the level_table */
l = resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m = (PC_mutex_resource_des *)kern_alloc(sizeof(PC_mutex_resource_des));
 
/* update the level_table with the new entry */
resource_table[l] = (resource_des *)m;
 
/* fill the resource_des descriptor */
m->m.r.rtype = MUTEX_RTYPE;
m->m.r.res_register = PC_res_register;
m->m.r.res_detach = PC_res_detach;
 
/* fill the mutex_resource_des descriptor */
m->m.init = PC_init;
m->m.destroy = PC_destroy;
m->m.lock = PC_lock;
m->m.trylock = PC_trylock;
m->m.unlock = PC_unlock;
 
/* fill the PC_mutex_resource_des descriptor */
for (i=0; i<MAX_PROC; i++)
m->nlocked[i] = 0, m->priority[i] = MAX_DWORD, m->blocked[i] = NIL;
 
m->mlist = NULL;
 
return l;
 
}
 
/*+ This function gets the ceiling of a PC mutex, and it have to be called
only by a task that owns the mutex.
Returns -1 if the mutex is not a PC mutex, 0 otherwise +*/
int PC_get_mutex_ceiling(const mutex_t *mutex, DWORD *ceiling)
{
resource_des *r;
 
if (!mutex)
return -1;
 
r = resource_table[mutex->mutexlevel];
 
if (ceiling)
*ceiling = ((PC_mutex_t *)mutex->opt)->ceiling;
else
return -1;
 
return 0;
}
 
/*+ This function sets the ceiling of a PC mutex, and it have to be called
only by a task that owns the mutex.
Returns -1 if the mutex is not a PC mutex, 0 otherwise +*/
int PC_set_mutex_ceiling(mutex_t *mutex, DWORD ceiling, DWORD *old_ceiling)
{
resource_des *r;
 
if (!mutex)
return -1;
 
r = resource_table[mutex->mutexlevel];
 
if (old_ceiling)
*old_ceiling = ((PC_mutex_t *)mutex->opt)->ceiling;
 
((PC_mutex_t *)mutex->opt)->ceiling = ceiling;
return 0;
}
 
void PC_set_task_ceiling(RLEVEL r, PID p, DWORD priority)
{
PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[r]);
m->priority[p] = priority;
}
 
/shark/trunk/modules/pc/subdir.mk
0,0 → 1,0
OBJS += pc/pc.o
/shark/trunk/modules/bd_edf/bd_edf/bd_edf.h
0,0 → 1,63
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/*
* Copyright (C) 2000 Massimiliano Giorgi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
/*
* CVS : $Id: bd_edf.h,v 1.1 2005-02-25 10:55:09 pj Exp $
*
* File: $File$
* Revision: $Revision: 1.1 $
* Last update: $Date: 2005-02-25 10:55:09 $
*/
 
#ifndef __BD_EDF_H__
#define __BD_EDF_H__
 
#include <kernel/types.h>
#include <kernel/descr.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
RLEVEL BD_EDF_register_module(void);
 
TIME bd_edf_getdl(void);
 
__END_DECLS
 
#endif
/shark/trunk/modules/bd_edf/bd_edf.c
0,0 → 1,126
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/*
* Copyright (C) 2000 Massimiliano Giorgi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
/*
* CVS : $Id: bd_edf.c,v 1.1 2005-02-25 10:55:09 pj Exp $
*
* File: $File$
* Revision: $Revision: 1.1 $
* Last update: $Date: 2005-02-25 10:55:09 $
*/
 
#include <bd_edf/bd_edf/bd_edf.h>
 
#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <kernel/assert.h>
 
static int mylevel=-1;
 
typedef struct TAGbd_edf_resource_des
{
resource_des rd;
TIME dl[MAX_PROC];
} bd_edf_resource_des;
 
static int res_register(RLEVEL l, PID p, RES_MODEL *r)
{
bd_edf_resource_des *m=(bd_edf_resource_des*)(resource_table[l]);
BDEDF_RES_MODEL *rm;
 
if (r->rclass!=BDEDF_RCLASS)
return -1;
if (r->level && r->level !=l)
return -1;
rm=(BDEDF_RES_MODEL*)r;
assertk(mylevel==l);
m->dl[p]=rm->dl;
 
return 0;
}
 
static void res_detach(RLEVEL l, PID p)
{
bd_edf_resource_des *m=(bd_edf_resource_des*)(resource_table[l]);
assertk(mylevel==l);
m->dl[p]=0;
}
 
RLEVEL BD_EDF_register_module(void)
{
RLEVEL l;
bd_edf_resource_des *m;
int i;
/* request an entry in the level_table */
l=resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m=(bd_edf_resource_des*)kern_alloc(sizeof(bd_edf_resource_des));
 
/* update the level_table with the new entry */
resource_table[l]=(resource_des*)m;
 
/* fill the resource_des descriptor */
m->rd.rtype=DEFAULT_RTYPE;
m->rd.res_register=res_register;
m->rd.res_detach=res_detach;
 
for (i=0;i<MAX_PROC;i++) m->dl[i]=MAX_TIME;
assertk(mylevel==-1);
mylevel=l;
 
return l;
}
 
TIME bd_edf_getdl(void)
{
bd_edf_resource_des *m;
if (mylevel==-1) return MAX_TIME;
m=(bd_edf_resource_des*)(resource_table[mylevel]);
if (m->dl[exec_shadow]==MAX_TIME) return MAX_TIME;
return m->dl[exec_shadow]+sys_gettime(NULL);
}
/shark/trunk/modules/bd_edf/subdir.mk
0,0 → 1,0
OBJS += bd_edf/bd_edf.o
/shark/trunk/modules/hartport/hartport/hartport.h
0,0 → 1,126
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: hartport.h,v 1.1 2005-02-25 10:53:41 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:53:41 $
------------
 
This file contains the Hartik 3.3.0's port primitives
 
Title:
HARTPORT (Hartik Ports)
 
Resource Models Accepted:
None
 
Description:
This module contains a port library compatible with the Hartik one.
 
Exceptions raised:
None
 
Restrictions & special features:
- This module is NOT Posix compliant
 
Author: Giuseppe Lipari
Date: 9/5/96
Revision: 2.0
Date: 14/3/97
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
#ifndef __PORT_H__
#define __PORT_H__
 
#include <sem/sem/sem.h>
#include <ll/ll.h>
 
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/* $HEADER- */
//#ifndef __HW_DEP_H__
//#include "hw_dep.h"
//#endif
/* $HEADER+ */
 
typedef short PORT;
 
#define MAX_PORT 15U /*+ Maximum number of ports +*/
#define MAX_PORT_INT 30U /*+ Max num. of port connections +*/
#define MAX_PORT_NAME 20U /*+ Maximum port name length +*/
#define MAX_HASH_ENTRY MAX_PORT /*+ More port stuff +*/
 
 
#define STREAM 1
#define MAILBOX 2
#define STICK 3
 
#define READ 0
#define WRITE 1
 
 
 
/*+ This function must be inserted in the __hartik_register_levels__ +*/
void HARTPORT_init(void);
 
 
/* Debugging functions */
void print_port(void);
void port_write(PORT p);
 
/* User level port primitives */
PORT port_create(char *name, int dim_mes, int num_mes, BYTE type, BYTE access);
PORT port_connect(char *name, int dim_mes, BYTE type, BYTE access);
void port_delete(PORT p);
void port_disconnect(PORT p);
 
WORD port_send(PORT p,void *m,BYTE wait);
WORD port_receive(PORT p,void *m,BYTE wait);
 
__END_DECLS
#endif /* __PORT_H__ */
 
/shark/trunk/modules/hartport/hartport.c
0,0 → 1,682
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: hartport.c,v 1.1 2005-02-25 10:53:41 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:53:41 $
------------
 
This file contains the Hartik 3.3.1 Port functions
 
Author: Giuseppe Lipari
Date: 2/7/96
 
File: Port.C (renamed to hartport.c)
Revision: 1.4
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <hartport/hartport/hartport.h>
#include <kernel/config.h>
#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <kernel/model.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <errno.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
 
static sem_t hash_mutex;
 
#define __PORT_DBG__
 
struct hash_port {
char name[MAX_PORT_NAME];
PID port_index;
BYTE blocked;
sem_t sem;
BYTE valid;
int port_int;
};
 
struct port_ker {
BYTE valid;
int next;
WORD dim_block;
BYTE *mem_block;
BYTE *head;
BYTE *tail;
BYTE type;
sem_t mutex;
sem_t pieno;
sem_t vuoto;
};
 
struct port_com {
BYTE valid;
int next;
PID proc_id;
BYTE access;
int port_index;
WORD dim_mes;
int h_index;
};
struct hash_port htable[MAX_HASH_ENTRY];
struct port_ker port_des[MAX_PORT];
struct port_com port_int[MAX_PORT_INT];
int freeportdes;
int freeportint;
 
static int port_installed = 0;
 
/*----------------------------------------------------------------------*/
/* port_init() : inizializza le strutture delle porte; da chiamare */
/* dentro __hartik_register_levels__(). */
/*----------------------------------------------------------------------*/
void HARTPORT_init(void)
{
int i;
 
if (port_installed)
return;
port_installed = 1;
 
/* Init hash table */
for (i = 0; i < MAX_HASH_ENTRY; i++) {
htable[i].valid = FALSE;
htable[i].port_int = NIL;
htable[i].blocked = 0;
}
 
/* mutex sem on the hash table */
sem_init(&hash_mutex, 0, 1);
 
/* init the port descriptor table */
for (i = 0; i < MAX_PORT - 1; i++) {
port_des[i].next = i+1;
port_des[i].valid = FALSE;
}
port_des[MAX_PORT - 1].next = NIL;
port_des[MAX_PORT - 1].valid = FALSE;
freeportdes = 0;
 
/* Init the port interface table */
for (i = 0; i < MAX_PORT_INT - 1; i++) {
port_int[i].next = i+1;
port_int[i].valid = FALSE;
}
port_int[MAX_PORT_INT - 1].next = NIL;
port_int[MAX_PORT_INT - 1].valid = FALSE;
// for (i = PORT_NO_MORE_DESCR; i <= PORT_UNVALID_DESCR; i++)
// exc_set(i,port_exception);
freeportint = 0;
}
 
 
/*----------------------------------------------------------------------*/
/* hash_fun() : address hash table */
/*----------------------------------------------------------------------*/
static int hash_fun(char *name)
{
return (*name % MAX_HASH_ENTRY);
}
 
 
/*----------------------------------------------------------------------*/
/* getfreedes : restituisce l'indice di un descrittore di porta libero */
/*----------------------------------------------------------------------*/
static int getfreedes(void)
{
int p;
SYS_FLAGS f;
 
f = kern_fsave();
if (freeportdes == NIL) {
errno = EPORT_NO_MORE_DESCR;
kern_frestore(f);
return -1;
}
p = freeportdes;
freeportdes = port_des[p].next;
kern_frestore(f);
return(p);
}
 
/*----------------------------------------------------------------------*/
/* ungetdes() : mette il descrittore tra quelli disponibile */
/*----------------------------------------------------------------------*/
static void ungetdes(int pd)
{
SYS_FLAGS f;
 
f = kern_fsave();
port_des[pd].next = freeportdes;
freeportdes = pd;
kern_frestore(f);
}
 
/*----------------------------------------------------------------------*/
/* get freeint : restituisce una interfaccia di porta libera */
/*----------------------------------------------------------------------*/
static int getfreeint(void)
{
int p;
SYS_FLAGS f;
f = kern_fsave();
if (freeportint == NIL) {
errno = EPORT_NO_MORE_INTERF;
kern_frestore(f);
return -1;
}
p = freeportint;
freeportint = port_int[p].next;
kern_frestore(f);
return(p);
}
 
/*----------------------------------------------------------------------*/
/* ungetint : rende disponibile l'interfaccia di porta specificata */
/*----------------------------------------------------------------------*/
static void ungetint(int pi)
{
SYS_FLAGS f;
 
f = kern_fsave();
port_int[pi].next = freeportint;
freeportint = pi;
kern_frestore(f);
}
 
/*----------------------------------------------------------------------*/
/* port_create(): Apre la porta specificata dalla stringa, effettuando */
/* automaticamente il collegamento */
/* WARNING : La funzione e' bloccante per la mutua esclusione sulla */
/* hash table */
/*----------------------------------------------------------------------*/
PORT port_create(char *name, int dim_mes, int num_mes, BYTE type, BYTE access)
{
int i, pd, pi;
WORD letti = 0;
BYTE flag = FALSE;
SYS_FLAGS f;
 
/*
Prendo un descrittore di porta.
*/
#ifdef __PORT_DBG__
if ((type == MAILBOX) && (access == WRITE)) {
errno = EPORT_INCOMPAT_MESSAGE;
return -1;
}
if ((type == STICK ) && (access == READ )) {
errno = EPORT_INCOMPAT_MESSAGE;
return -1;
}
#endif
pd = getfreedes();
if (pd == -1) return -1;
/*
Devo andare in mutua esclusione sulla tabella di hash ! Poiche' questa
viene acceduta dalle openport e dalle connect sono sicuro che una
connect non puo' interrompere una openport.
*/
sem_wait(&hash_mutex);
/*
Scorro la tabella di hash fino a trovare un'entrata libera, oppure
una entrata occupata in precedenza da una connect che ci si e' bloccata
sopra (blocked > 0). Se ne trovo una gia' aperta da un'altra openport
esco con errore. Lo stesso se sono state occupate tutte le entrate della
tabella (tramite la var. letti);
*/
i = hash_fun(name);
while (!flag) {
if (htable[i].valid == FALSE) flag = TRUE;
else {
if (strcmp(htable[i].name,name) == 0) {
if (htable[i].blocked == 0) {
errno = EPORT_ALREADY_OPEN;
sem_post(&hash_mutex);
return -1;
}
else flag = TRUE;
}
else {
i = (i+1) % MAX_HASH_ENTRY;
letti++;
}
}
if (letti > MAX_HASH_ENTRY-1) {
errno = EPORT_NO_MORE_HASHENTRY;
sem_post(&hash_mutex);
return -1;
}
 
}
htable[i].valid = TRUE;
strcpy(htable[i].name, name);
 
htable[i].port_index = pd;
/*
A questo punto inizializzo tutta la struttura del descrittore di porta
*/
if (type == STICK) port_des[pd].dim_block = dim_mes;
else port_des[pd].dim_block = dim_mes * num_mes;
 
f = kern_fsave();
port_des[pd].mem_block = kern_alloc(port_des[pd].dim_block);
kern_frestore(f);
if (port_des[pd].mem_block == NULL) {
errno = EPORT_2_CONNECT;
sem_post(&hash_mutex);
return -1;
}
 
port_des[pd].head = port_des[pd].tail = port_des[pd].mem_block;
 
sem_init(&port_des[pd].mutex,0,1);
sem_init(&port_des[pd].pieno,0,port_des[pd].dim_block);
sem_init(&port_des[pd].vuoto,0,0);
port_des[pd].type = type;
/*
Prendo e inizializzo la struttura dell'interfaccia di porta verso il
processo (e' la varibile pi quella che restituisco)
*/
pi = getfreeint();
if (pi == -1) {
sem_post(&hash_mutex);
return -1;
}
/* port_int[pi].proc_id = exec_shadow; */
port_int[pi].access = access;
port_int[pi].port_index = pd;
port_int[pi].dim_mes = dim_mes;
port_int[pi].next = NIL;
port_int[pi].h_index = i;
port_des[pd].valid = TRUE;
port_int[pi].valid = TRUE;
/*
Sblocco eventuali processi che avevano fatto la connect nella coda
semaforica che sta nell'entrata relativa della hash table !
*/
if (htable[i].blocked > 0) {
sem_xpost(&htable[i].sem, htable[i].blocked);
htable[i].blocked = 0;
sem_destroy(&htable[i].sem);
}
/*
Infine libero la mutua esclusione.
*/
sem_post(&hash_mutex);
return (pi);
}
 
/*----------------------------------------------------------------------*/
/* port_connect(): collega la porta specificata dalla stringa. */
/* WARNING : La funzione e' bloccante per la mutua esclusione sulle */
/* strutture delle porte */
/*----------------------------------------------------------------------*/
PORT port_connect(char *name, int dim_mes, BYTE type, BYTE access)
{
int i, pi, pd, pn,letti = 0;
BYTE flag = FALSE, create = FALSE;
 
#ifdef __PORT_DBG__
if ((type == MAILBOX) && (access == READ )) {
errno = EPORT_INCOMPAT_MESSAGE;
return -1;
}
if ((type == STICK ) && (access == WRITE)) {
errno = EPORT_INCOMPAT_MESSAGE;
return -1;
}
#endif
/*
Per prima cosa mi prendo una interfaccia di porta e la riempio
parzialmente.
*/
pi = getfreeint();
if (pi == -1) return -1;
/* port_int[pi].proc_id = exec_shadow; */
port_int[pi].access = access;
port_int[pi].dim_mes = dim_mes;
port_int[pi].next = NIL;
/*
Mutua esclusione sulla tabella di hash
*/
sem_wait(&hash_mutex);
/*
Cerco il descrittore appropriato : Se la porta e' gia' stata aperta
allora esco dal ciclo con flag = TRUE, create = FALSE, l'indice i che
punta all'entrata della tabella di hash, e con htable[i].blocked = 0;
In tutti gli altri casi significa che la porta non e' stata aperta e
quindi devo bloccarmi.
*/
i = hash_fun(name);
while (!flag) {
/* Devo crearla */
if (htable[i].valid == FALSE) {
flag = TRUE;
create = TRUE;
}
/* l'ho trovata (ma non so ancora se e' stata aperta) */
else if (strcmp(htable[i].name, name) == 0) flag = TRUE;
/* scandisco l'entrata successiva */
else {
i = (i+1) % MAX_HASH_ENTRY;
letti ++;
}
#ifdef __PORT_DBG__
/* se ho scorso tutto l'array senza trovare nulla */
if (letti > MAX_HASH_ENTRY) {
errno = EPORT_NO_MORE_HASHENTRY;
sem_post(&hash_mutex);
return -1;
}
#endif
}
/*
Se devo aspettare che venga aperta (create = TRUE) allora mi blocco in
attesa sul semaforo della htable. Per non avere troppi semafori inutili
in giro, me lo creo sul momento.
*/
if (create == TRUE) {
htable[i].valid = TRUE;
htable[i].blocked = 1;
strcpy(htable[i].name, name);
sem_init(&htable[i].sem, 0, 0);
sem_post(&hash_mutex);
sem_xwait(&htable[i].sem,1,BLOCK);
}
/*
Se invece si e' gia' bloccata un'altra connect sopra, mi blocco anch'io.
in ogni caso devo liberare la mutua esclusione sulla hash dato che ho
gia' fatto tutto quello che dovevo fare con la hash.
*/
else {
if (htable[i].blocked > 0) {
htable[i].blocked++;
sem_post(&hash_mutex);
sem_xwait(&htable[i].sem,1,BLOCK);
}
else sem_post(&hash_mutex);
}
/*
Controlli di errore.
*/
pd = htable[i].port_index;
#ifdef __PORT_DBG__
if (type != port_des[pd].type) {
errno = EPORT_UNSUPPORTED_ACC;
return -1;
}
if ((type == STICK) && (dim_mes != port_des[pd].dim_block)) {
errno = EPORT_WRONG_OP;
return -1;
}
if ((type != STICK) && (port_des[pd].dim_block % dim_mes) != 0) {
errno = EPORT_WRONG_OP;
return -1;
}
#endif
 
sem_wait(&hash_mutex);
pn = htable[i].port_int;
if (pn != NIL) {
#ifdef __PORT_DBG__
if (type == STREAM) {
errno = EPORT_WRONG_TYPE;
sem_post(&hash_mutex);
return -1;
}
if (dim_mes != port_int[pn].dim_mes) {
errno = EPORT_WRONG_OP;
sem_post(&hash_mutex);
return -1;
}
#endif
port_int[pi].next = htable[i].port_int;
htable[i].port_int = pi;
}
else htable[i].port_int = pi;
sem_post(&hash_mutex);
port_int[pi].h_index = i;
port_int[pi].port_index = pd;
port_int[pi].valid = TRUE;
return(pi);
}
 
/*----------------------------------------------------------------------*/
/* port_delete() : inversa di port_open, libera tutto */
/*----------------------------------------------------------------------*/
void port_delete(PORT pi)
{
int i;
struct port_ker *pd;
struct port_com *pp;
SYS_FLAGS f;
 
pp = &port_int[pi];
sem_wait(&hash_mutex);
i = pp->h_index;
pd = &port_des[htable[i].port_index];
pd->valid = FALSE;
sem_destroy(&pd->mutex);
sem_destroy(&pd->pieno);
sem_destroy(&pd->vuoto);
f = kern_fsave();
kern_free(pd->mem_block, pd->dim_block);
kern_frestore(f);
 
ungetdes(htable[i].port_index);
pp->valid = FALSE;
htable[i].port_int = pp->next;
ungetint(pi);
htable[i].valid = FALSE;
sem_post(&hash_mutex);
}
 
/*----------------------------------------------------------------------*/
/* port_disconnect() : libera l'interfaccia di porta */
/*----------------------------------------------------------------------*/
void port_disconnect(PORT pi)
{
sem_wait(&hash_mutex);
if (htable[port_int[pi].h_index].valid == TRUE)
htable[port_int[pi].h_index].port_int = port_int[pi].next;
port_int[pi].valid = FALSE;
ungetint(pi);
sem_post(&hash_mutex);
}
 
/*----------------------------------------------------------------------*/
/* port_send() : Invia un messaggio alla porta */
/*----------------------------------------------------------------------*/
WORD port_send(PORT pi, void *msg, BYTE wait)
{
struct port_ker *pd;
struct port_com *pp;
 
pp = &(port_int[pi]);
pd = &(port_des[pp->port_index]);
#ifdef __PORT_DBG__
if (pp->access == READ) {
errno = EPORT_WRONG_OP;
return -1;
}
if (!pd->valid) {
errno = EPORT_INVALID_DESCR;
return -1;
}
 
#endif
 
if (pd->type == STICK) sem_wait(&pd->mutex);
else if (pd->type == STREAM) {
if (sem_xwait(&pd->pieno,pp->dim_mes,wait)) return(FALSE);
}
else {
if (sem_xwait(&pd->pieno, pp->dim_mes,wait)) return(FALSE);
sem_wait(&pd->mutex);
}
 
memcpy(pd->head, msg, pp->dim_mes);
 
pd->head += pp->dim_mes;
if (pd->head >= (pd->mem_block + pd->dim_block))
pd->head -= pd->dim_block;
 
if (pd->type == STICK) sem_post(&pd->mutex);
else if (pd->type == STREAM) sem_xpost(&pd->vuoto, pp->dim_mes);
else {
sem_xpost(&pd->vuoto, pp->dim_mes);
sem_post(&pd->mutex);
}
return(TRUE);
}
 
/*----------------------------------------------------------------------*/
/* port_receive() : Riceve un messaggio dalla porta */
/*----------------------------------------------------------------------*/
WORD port_receive(PORT pi,void *msg,BYTE wait)
{
struct port_ker *pd;
struct port_com *pp;
 
pp = &(port_int[pi]);
pd = &(port_des[pp->port_index]);
#ifdef __PORT_DBG__
if (pp->access == WRITE) {
errno = EPORT_WRONG_OP;
return -1;
}
if (!pd->valid) {
errno = EPORT_INVALID_DESCR;
return -1;
}
#endif
 
if (pd->type == STICK) sem_wait(&pd->mutex);
else if (pd->type == STREAM) {
if (sem_xwait(&pd->vuoto,pp->dim_mes,wait)) return(FALSE);
}
else {
if (sem_xwait(&pd->vuoto,pp->dim_mes,wait)) return(FALSE);
sem_wait(&pd->mutex);
}
 
memcpy(msg, pd->tail, pp->dim_mes);
 
pd->tail += pp->dim_mes;
if (pd->tail >= (pd->mem_block + pd->dim_block))
pd->tail -= pd->dim_block;
 
if (pd->type == STICK) sem_post(&pd->mutex);
else if (pd->type == STREAM) sem_xpost(&pd->pieno, pp->dim_mes);
else {
sem_xpost(&pd->pieno, pp->dim_mes);
sem_post(&pd->mutex);
}
return(TRUE);
}
 
#ifdef __PORT_DBG__
 
void print_port(void)
{
int i;
struct port_ker *pd;
struct port_com *pp;
/*
kern_printf("Hash Table :\n");
for (i=0; i<MAX_HASH_ENTRY; i++)
kern_printf("%d\tvl: %d\tbl: %d\tpd: %d\t%s\n", i,
htable[i].valid, htable[i].blocked, htable[i].port_index,
htable[i].name);
*/
kern_printf("Port des :\n");
kern_printf("Free port des : %d\n", freeportdes);
for (i=0; i<MAX_PORT_INT; i++)
if (port_int[i].valid) {
pp = &port_int[i];
pd = &port_des[pp->port_index];
kern_printf("%d %s vt: %d pn: %d\n",i,htable[pp->h_index].name,
pd->vuoto,pd->pieno);
}
/*
kern_printf("Port int :\n");
kern_printf("Free port int : %d\n", freeportint);
for (i=0; i<MAX_PORT_INT; i++)
kern_printf("%d vl : %d dm : %d port_index : %d proc_id : %d\n", i,
port_int[i].valid, port_int[i].dim_mes,
port_int[i].port_index, port_int[i].proc_id);
*/
}
 
void port_write(PORT p)
{
struct port_ker *pd;
struct port_com *pp;
char msg[80];
 
pp = &port_int[p];
pd = &port_des[pp->port_index];
 
kern_printf(msg,"%d pd: %d vt: %d pn: %d ",p,pp->port_index,
pd->vuoto,pd->pieno);
}
 
#endif
/shark/trunk/modules/hartport/hartport.his
0,0 → 1,12
1.2
32 bit testing!! We use check_addr() to validate the buffers...
This is a very dirty bug-avoider :-)
Using 16 bits, check_addr() is a dummy functions.
 
1.3
System calls have been modified, changing slightly their
semantic.
1.4
Exception handling has been integrated!
 
/shark/trunk/modules/hartport/subdir.mk
0,0 → 1,0
OBJS += hartport/hartport.o
/shark/trunk/modules/srp/srp/srp.h
0,0 → 1,121
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: srp.h,v 1.1 2005-02-25 10:40:58 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:40:58 $
------------
 
This file contains the Stack Resource Policy (SRP) Protocol
 
Title:
SRP (Stack Resource Policy)
 
Resource Models Accepted:
None
 
Description:
This module implement the Stack Resource policy using the shadow field
of the task descriptor. No difference is made upon the task model of the
tasks that use SRP mutexes.
 
A SRP mutex is created passing the SRP_mutexattr structure to mutex_init.
 
In effect, this module doesn't work correctly if it is
used, for example, with a Round Robin Module; in this case we
can have a task B that preempts A, but when B finishes his timeslice, A
can be scheduled by the RR module, and the SRP can't block it, because
the SRP module can not control preemptions (it works only using the shadow
field at lock and unlock time!!!).
Note that this problen not affect the EDF, RM & co. algorithms... because
if B preempts A, A will never preempts on B...
 
A task that want to use the SRP protocol MUST declare it using a
SRP_RES_MODEL in the task_create. Only the first SRP_RES_MODEL is
considered.
 
A task that want to use a SRP mutex have to declare it with the
SRP_usemutex function as last parameter of a task_create call, AFTER
the specification of the preemption level.
 
Exceptions raised:
XMUTEX_OWNER_KILLED
This exception is raised when a task ends and it owns one or more
mutexes
 
XSRP_UNVALID_LOCK
This exception is raised when a task try to lock a srp mutex but
it don't have the provilege.
 
Restrictions & special features:
- This module is NOT Posix compliant
- This module can manage any number of SRP mutexes.
- If a task ends (because it reaches the end of the body or because it
is killed by someone) and it owns some mutex, an exception is raised.
- if a mutex unlock is called on a mutex not previously
locked or previously locked by another task an exception is raised
- A SRP mutex can not be statically allocated
- The module is incompatible with the primitive TASK_JOIN, so the tasks
that uses SRP can NOT call task_join.
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
 
#ifndef __SRP_H__
#define __SRP_H__
 
#include <kernel/model.h>
#include <kernel/descr.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
RLEVEL SRP_register_module(void);
 
extern __inline__ RES_MODEL *SRP_usemutex(mutex_t *m) {
return (RES_MODEL *)m->opt;
};
 
__END_DECLS
#endif
/shark/trunk/modules/srp/srp.c
0,0 → 1,778
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: srp.c,v 1.1 2005-02-25 10:40:58 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:40:58 $
------------
 
Stack Resource Policy. see srp.h for general details...
 
 
HOW the shadows are managed in this module
------------------------------------------
 
All the task that use SRP are inserted in an ordered list, called tasklist.
 
when a task lock a mutex and change the system ceiling, all the shadows
of the tasks with preemption level <= are set to the locking task, and
viceversa when a mutex is unlocked.
 
The real algorithm is slightly different: for example consider a task set
of 8 tasks. We represent each task here as (PID, shadow, preemption level).
 
There is also a field, current, used to scan the tasklist.
 
When the system starts, the situation is as follows:
 
system ceiling = 0, current = NIL
(a,a,1) (b,b,2) (c,c,2) (d,d,2) (e,e,3) (f,f,4) (g,g,4) (h,h,5)
 
for example, task a is scheduled, and lock a mutex that cause the system
ceiling to become 2. The situation will be the following:
 
system ceiling = 2, current = d
(a,a,1) (b,a,2) (c,a,2) (d,a,2) (e,e,3) (f,f,4) (g,g,4) (h,h,5)
 
Now suppose that task f preempts on task a. (no change to the shadows)
 
Then the task f locks a mutex and the system ceiling become 4. The shadows
will be set as follows:
 
system ceiling = 4, current = g
(a,f,1) (b,a,2) (c,a,2) (d,a,2) (e,f,3) (f,f,4) (g,f,4) (h,h,5)
 
The system maintains a stack of the locked mutexes. each mutex has in the
descriptor the space for implementing a stack, useful in the unlock()
function to undo the modify done whith the last lock()...
 
This approach minimizes the number of shadows to be set, so minimizes
the complexity of the lock/unlock operations.
 
Unfortunately, it creates a tree in the shadows (i.e., when sys_ceiling=4,
task c points to task a that points to task f, and so on....). This may
cause a performance a little worse with respect to a one-jump shadow set.
This is not a big problem because when a task is preempted it is very
difficult (if not impossible!) that it may be rescheduled before the end
of another high priority task.
 
Dynamic creation and termination of tasks
-----------------------------------------
This module allows dynamic creation and termination of tasks.
 
To be correct the system have to really activate the task only when the
system ceiling is 0.
 
To implement this there is a list, the lobbylist, that contains that tasks.
 
When a task is created and the system ceiling is > 0, the task is inserted
on the top of the list, and his activation are frozen via a call to
task_block_activations.
 
When the system_ceiling returns to 0, the lobby list is purged and for each
task in that list the task_unblock_activations is called. if the function
return a number >0, a task call task_activate is done on the task.
 
the tasks are inserted into the lobby list using only the next field.
 
 
 
When a mutex is destryed or a task is created or killed, the ceiling
have to be recalculated. The recalc is made when the system ceiling go down
to 0. to know whitch are the mutexes that need the operation they are
inserted into the srp_recalc list.
 
 
The SRP_usemutex function (see srp.h) is used to declare the used mutexes
of a task. Why this and how it works?
In this way, a task can insert directly the list of the mutexes that it uses
without allocating others resource models, but using directly the mutexes
that MUST be (in any case) initialized before the task creation...
This is done in a simple way, inheriting the SRP_mutex_t from the RES_MODEL.
When a task registers a mutex, the SRP module receive the pointer to that
mutex, so it can do all the stuffs with the needed data structures.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <srp/srp/srp.h>
 
#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
typedef struct SRP_mutexstruct_t SRP_mutex_t;
 
/* The SRP resource level descriptor */
typedef struct {
mutex_resource_des m; /*+ the mutex interface +*/
 
int nlocked[MAX_PROC]; /*+ how many mutex a task currently locks +*/
 
struct {
DWORD preempt;
PID next;
PID prev;
} proc_preempt[MAX_PROC]; /*+ the preemption level of each task in the
system; if a task don't use SRP its value
is 0; if a task use SRP the field preempt
is != 0 and the item is enqueued in the
ordered list tasklist +*/
 
PID tasklist; /*+ A list of all the task that can use SRP,
ordered by the preemption level of each
task. +*/
PID current; /*+ A pointer used to set shadows +*/
 
PID lobbylist; /*+ A list for all the new tasks created when
the system ceiling is != 0. These tasks
will be inserted into tasklist when the
ceiling return to 0. +*/
SRP_mutex_t *srpstack; /*+ this is the stack where we store the system
ceiling +*/
 
SRP_mutex_t *srprecalc; /*+ the list of all mutexes that need a ceiling
recalc +*/
 
SRP_mutex_t *srplist; /*+ an unordered list of all created SRP
mutexes +*/
 
} SRP_mutex_resource_des;
 
 
/* this is the structure normally pointed by the opt field in the
mutex_t structure */
struct SRP_mutexstruct_t {
RES_MODEL r; /*+ This little trick make possible the use of
SRP_usemutex +*/
 
/* because the number of mutexes that can be created is not limited,
the stack normally used to store the system ceiling is implemented
through these two fields in the mutex descriptor. Note that the mutex
are mono-resource, so when we alloc space for a mutex descriptor we
alloc also the needed space for the stack... */
DWORD sysceiling; /*+ The system ceiling; this field contains
- a meaningless value if the struct is not inserted
into the srpstack
- the system ceiling if the struct is on the top of
the srpstack
- a "frozen" system ceiling if the struct is not on
the top of the srpstack.
when a mutex is locked, it is inserted into srpstack
updating the system ceiling automatically
+*/
SRP_mutex_t *srpstack_next; /*+ the next entry on the srpstack +*/
 
 
 
BYTE use[MAX_PROC]; /*+ use[p]==1 if the task p declared that it uses the
mutex +*/
 
DWORD ceiling; /*+ max premption level of the tasks that use the mutex +*/
 
PID owner; /*+ the task that owns the mutex, NIL otherwise +*/
 
int in_recalc_list; /*+ a flag: 1 if the mutex is in the recalc list +*/
SRP_mutex_t *srprecalc_next; /*+ the next item in the recalc list +*/
SRP_mutex_t *srprecalc_prev; /*+ the prev item; useful in extractions +*/
 
SRP_mutex_t *srplist_next; /*+ the next item in the srplist list +*/
SRP_mutex_t *srplist_prev; /*+ the prev item; useful in extractions+*/
};
 
 
 
 
 
 
 
 
 
 
 
/* -----------------------------------------------------------------------
LISTS HANDLING
----------------------------------------------------------------------- */
 
/*+ this function inserts a task into the tasklist ordered list +*/
static void SRP_insert_tasklist(SRP_mutex_resource_des *m, PID t)
{
PID p,q;
 
p = NIL;
q = m->tasklist;
 
while ((q != NIL) &&
(m->proc_preempt[t].preempt >= m->proc_preempt[q].preempt)) {
p = q;
q = m->proc_preempt[q].next;
}
 
if (p != NIL)
m->proc_preempt[p].next = t;
else
m->tasklist = t;
 
if (q != NIL) m->proc_preempt[q].prev = t;
 
m->proc_preempt[t].next = q;
m->proc_preempt[t].prev = p;
}
 
/*+ this function extracts a task from the tasklist +*/
static void SRP_extract_tasklist(SRP_mutex_resource_des *m, PID i)
{
PID p,q;
 
p = m->proc_preempt[i].prev;
q = m->proc_preempt[i].next;
 
if (p == NIL) m->tasklist = q;
else m->proc_preempt[p].next = m->proc_preempt[i].next;
 
if (q != NIL) m->proc_preempt[q].prev = m->proc_preempt[i].prev;
}
 
 
/*+ this function inserts a task into the lobbylist (in an unordered way) +*/
static void SRP_insertfirst_lobbylist(SRP_mutex_resource_des *m, PID p)
{
m->proc_preempt[p].next = m->lobbylist;
m->proc_preempt[p].prev = NIL;
 
m->proc_preempt[m->lobbylist].prev = p;
m->lobbylist = p;
}
 
/*+ this function extract the first task from the lobbylist
the lobbylist must be not-empty!!!! +*/
static __inline__ PID SRP_extractfirst_lobbylist(SRP_mutex_resource_des *m)
{
PID lobby = m->lobbylist;
m->lobbylist = m->proc_preempt[m->lobbylist].next;
return lobby;
}
 
 
 
/*+ This function insert a mutex into the recalc list ONLY if the mutex
isn't already in that list... +*/
static void SRP_insertfirst_recalclist(SRP_mutex_resource_des *m,
SRP_mutex_t *mut)
{
if (!mut->in_recalc_list) {
mut->srprecalc_next = m->srprecalc;
mut->srprecalc_prev = NULL;
if (m->srprecalc) m->srprecalc->srprecalc_prev = mut;
m->srprecalc = mut;
 
mut->in_recalc_list = 1;
}
}
 
/*+ this function extracts mut from the list l. +*/
static void SRP_extract_recalclist(SRP_mutex_resource_des *m,
SRP_mutex_t *mut)
{
SRP_mutex_t *p, *q;
 
p = mut->srprecalc_prev;
q = mut->srprecalc_next;
 
if (p)
p->srprecalc_next = mut->srprecalc_next;
else
m->srprecalc = q;
 
if (q) q->srprecalc_prev = mut->srprecalc_prev;
}
 
/*+ this function extracts mut from the list l. +*/
static void SRP_extract_srplist(SRP_mutex_resource_des *m,
SRP_mutex_t *mut)
{
SRP_mutex_t *p, *q;
 
p = mut->srplist_prev;
q = mut->srplist_next;
 
if (p)
p->srplist_next = mut->srplist_next;
else
m->srplist = q;
 
if (q) q->srplist_prev = mut->srplist_prev;
}
 
 
 
/* -----------------------------------------------------------------------
End of LISTS HANDLING
----------------------------------------------------------------------- */
 
 
 
 
/*+ This funcyion returns the actual system ceiling +*/
static __inline__ DWORD sysceiling(SRP_mutex_resource_des *m)
{
if (m->srpstack)
return m->srpstack->sysceiling;
else
return 0;
}
 
/*+ this function recalc the mutex ceiling basing on the preemption levels
stored in the mevel m +*/
static void SRP_recalc_ceiling_value(SRP_mutex_resource_des *m,
SRP_mutex_t *mut)
{
PID p;
int ceiling;
 
ceiling = 0;
for (p = 0; p < MAX_PROC; p++)
if (mut->use[p] && ceiling < m->proc_preempt[p].preempt)
ceiling = m->proc_preempt[p].preempt;
 
mut->ceiling = ceiling;
}
 
 
static int SRP_res_register(RLEVEL l, PID p, RES_MODEL *r)
{
SRP_mutex_resource_des *m = (SRP_mutex_resource_des *)(resource_table[l]);
 
if (r->level && r->level !=l)
return -1;
 
if (r->rclass == SRP_RCLASS) {
/* SRP_RES_MODEL resource model */
// kern_printf("!%d %d",((SRP_RES_MODEL *)r)->preempt,p);
 
if (m->proc_preempt[p].preempt == 0) {
/* only the first SRP_RES_MODEL is considered */
SRP_RES_MODEL *srp = (SRP_RES_MODEL *)r;
 
m->proc_preempt[p].preempt = srp->preempt;
// kern_printf("res_register: preempt=%d, p=%d\n",srp->preempt,p);
 
/* insert the new task in the ordered list tasklist or in the lobby
list */
if (m->srpstack) {
SRP_insertfirst_lobbylist(m,p);
/* we have also to freeze the activations... */
task_block_activation(p);
// kern_printf("LOBBY!!!");
}
else
SRP_insert_tasklist(m,p);
}
 
m->nlocked[p] = 0;
return 0;
}
else if (r->rclass == SRP2_RCLASS) {
/* a mutex passed via SRP_useres() */
SRP_mutex_t *mut = (SRP_mutex_t *)r;
 
if (mut->use[p])
/* the mutex is already registered, do nothing! */
return -1;
 
/* register the mutex for the task */
mut->use[p] = 1;
 
if (m->srpstack)
SRP_insertfirst_recalclist(m,mut);
else {
/* we recalc the mutex ceiling */
if (mut->ceiling < m->proc_preempt[p].preempt)
mut->ceiling = m->proc_preempt[p].preempt;
 
}
return 0;
}
else
return -1;
}
 
static void SRP_res_detach(RLEVEL l, PID p)
{
SRP_mutex_resource_des *m = (SRP_mutex_resource_des *)(resource_table[l]);
SRP_mutex_t *mut;
 
if (m->proc_preempt[p].preempt == 0)
return;
 
if (m->nlocked[p])
kern_raise(XMUTEX_OWNER_KILLED, p);
else
m->nlocked[p] = 0;
 
for (mut = m->srplist; mut; mut = mut->srplist_next)
{
if (!mut->use[p])
/* the mutex is not registered, do nothing! */
continue;
 
/* unregister the mutex for the task */
mut->use[p] = 0;
 
if (m->srpstack)
SRP_insertfirst_recalclist(m,mut);
else
SRP_recalc_ceiling_value(m,mut);
}
 
/* check if current points to the task being killed */
if (m->current == p)
m->current = m->proc_preempt[m->current].prev;
 
/* remove the task from the tasklist */
SRP_extract_tasklist(m, p);
}
 
static int SRP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
{
SRP_mutex_resource_des *lev = (SRP_mutex_resource_des *)(resource_table[l]);
SRP_mutex_t *p;
PID x;
 
if (a->mclass != SRP_MCLASS)
return -1;
 
p = (SRP_mutex_t *) kern_alloc(sizeof(SRP_mutex_t));
 
/* control if there is enough memory; no control on init on a
non- destroyed mutex */
 
if (!p)
return (ENOMEM);
 
res_default_model(p->r, SRP2_RCLASS);
p->sysceiling = 0; /* dummy value :-) */
p->srpstack_next = NULL; /* dummy value :-) */
 
for (x = 0; x < MAX_PROC; x++)
p->use[x] = 0;
 
p->ceiling = 0;
p->owner = NIL;
 
p->in_recalc_list = 0;
p->srprecalc_next = NULL; /* dummy value :-) */
p->srprecalc_prev = NULL; /* dummy value :-) */
 
p->srplist_next = lev->srplist;
p->srplist_prev = NULL;
if (lev->srplist) lev->srplist->srplist_prev = p;
lev->srplist = p;
 
m->mutexlevel = l;
m->opt = (void *)p;
 
return 0;
}
 
 
static int SRP_destroy(RLEVEL l, mutex_t *m)
{
SRP_mutex_resource_des *lev = (SRP_mutex_resource_des *)(resource_table[l]);
SRP_mutex_t *mut;
SYS_FLAGS f;
 
mut = m->opt;
 
if (mut->owner != NIL)
return (EBUSY);
 
f = kern_fsave();
 
/* the mutex isn't in the srpstack, because it is not busy */
 
/* check srprecalc list */
if (mut->in_recalc_list)
SRP_extract_recalclist(lev, mut);
 
/* extract from srplist */
SRP_extract_srplist(lev, mut);
 
if (m->opt) {
kern_free(m->opt,sizeof(SRP_mutex_t));
m->opt = NULL;
}
kern_frestore(f);
 
return 0;
}
 
static int SRP_lock(RLEVEL l, mutex_t *m)
{
SRP_mutex_resource_des *lev = (SRP_mutex_resource_des *)(resource_table[l]);
SRP_mutex_t *mut;
DWORD oldsysceiling;
SYS_FLAGS f;
 
f = kern_fsave();
 
mut = (SRP_mutex_t *)m->opt;
if (!mut) {
/* if the mutex is not initialized */
kern_frestore(f);
return (EINVAL);
}
 
if (mut->owner == exec_shadow) {
/* the task already owns the mutex */
kern_frestore(f);
return (EDEADLK);
}
 
if (!mut->use[exec_shadow] ||
lev->proc_preempt[exec_shadow].preempt == 0 ||
mut->owner != NIL)
{
// kern_printf("SRP:lev =%d owner=%d use=%d preempt=%d exec_shadow=%d\n",
// lev, mut->owner,
// mut->use[exec_shadow],
// lev->proc_preempt[exec_shadow].preempt,exec_shadow);
kern_raise(XSRP_INVALID_LOCK, exec_shadow);
kern_frestore(f);
return (EINVAL);
}
 
/* we know that:
- the task use the SRP protocol and the mutex that it wants to lock
- the mutex is free
=> the task can lock now the mutex
*/
 
lev->nlocked[exec_shadow]++;
mut->owner = exec_shadow;
 
oldsysceiling = sysceiling(lev);
 
/* update the system ceiling */
mut->sysceiling = (oldsysceiling>mut->ceiling) ?
oldsysceiling : mut->ceiling;
 
/* update the srpstack */
mut->srpstack_next = lev->srpstack;
lev->srpstack = mut;
 
/* if the system ceiling is changed we have to change the shadows
Note that mut->sysceiling is the NEW sysceiling */
if (oldsysceiling != mut->sysceiling) {
/* we set the shadow of the last task that did a lock */
if (mut->srpstack_next)
proc_table[mut->srpstack_next->owner].shadow = exec_shadow;
 
/* now we set the shadow field of the remainig tasks */
 
/* first, get the first task to manage */
if (lev->current == NIL)
lev->current = lev->tasklist;
else
/* Note that because the sysceiling is increased by the lock, currrent
can't be at the end of the tasklist, so the operation is legal */
lev->current = lev->proc_preempt[lev->current].next;
 
for (;;) {
PID x; /* for readablenesss only :-) */
 
proc_table[lev->current].shadow = exec_shadow;
 
/* test if we have to touch the next task in the tasklist */
x = lev->proc_preempt[lev->current].next;
if (x == NIL ||
lev->proc_preempt[x].preempt > mut->sysceiling)
break;
 
/* look at the next task ! */
lev->current = lev->proc_preempt[lev->current].next;
}
}
 
kern_frestore(f);
 
return 0;
}
 
/* SRP_trylock is equal to SRP_lock because the SRP_lock don't block !!! */
 
static int SRP_unlock(RLEVEL l, mutex_t *m)
{
SRP_mutex_resource_des *lev;
SRP_mutex_t *mut;
DWORD newsysceiling;
 
lev = (SRP_mutex_resource_des *)(resource_table[l]);
mut = (SRP_mutex_t *)m->opt;
 
if (!mut)
return (EINVAL);
 
if (mut->owner != exec_shadow) {
/* the mutex is owned by another task!!! */
kern_sti();
return (EPERM);
}
 
if (!lev->srpstack || lev->srpstack != mut) {
/* the mutex is not the top of the stack!!! (erroneous nesting!) */
kern_sti();
return (EINVAL);
}
 
proc_table[exec_shadow].context = kern_context_save();
 
/* the mutex is mine and it is at the top of the stack */
lev->nlocked[exec_shadow]--;
 
mut->owner = NIL;
// kern_printf("Ûnlocked=%dÛ",lev->nlocked[exec_shadow]);
 
/* extract the top of the stack */
lev->srpstack = lev->srpstack->srpstack_next;
 
/* if the sysceiling decreases, we update the shadows */
newsysceiling = sysceiling(lev);
if (newsysceiling < mut->sysceiling) {
do {
proc_table[lev->current].shadow = lev->current;
lev->current = lev->proc_preempt[lev->current].prev;
} while (lev->current != NIL &&
lev->proc_preempt[lev->current].preempt > newsysceiling);
 
if (lev->srpstack)
/* this is the stack that owns the mutex with the current sysceiling*/
proc_table[lev->srpstack->owner].shadow = lev->srpstack->owner;
}
 
/* if it is the last mutex in the stack, handle lobbylist and srprecalc */
if (!lev->srpstack) {
// kern_printf("UNLOBBY:");
while (lev->lobbylist != NIL) {
PID x = SRP_extractfirst_lobbylist(lev);
// kern_printf("x=%d - ",x);
SRP_insert_tasklist(lev, x);
 
/* activate the task if it was activated while in lobby list! */
if (task_unblock_activation(x)) {
struct timespec t;
LEVEL sl = proc_table[x].task_level;
kern_gettime(&t);
level_table[sl]->public_activate(sl,x,&t);
// kern_printf("activate it!!!");
}
}
 
while (lev->srprecalc) {
SRP_recalc_ceiling_value(lev, lev->srprecalc);
SRP_extract_recalclist(lev, lev->srprecalc);
}
}
 
scheduler();
TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
kern_context_load(proc_table[exec_shadow].context);
 
return 0;
}
 
RLEVEL SRP_register_module(void)
{
RLEVEL l; /* the level that we register */
SRP_mutex_resource_des *m; /* for readableness only */
PID i; /* a counter */
 
printk("SRP_register_module\n");
 
/* request an entry in the level_table */
l = resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m = (SRP_mutex_resource_des *)kern_alloc(sizeof(SRP_mutex_resource_des));
 
/* update the level_table with the new entry */
resource_table[l] = (resource_des *)m;
 
/* fill the resource_des descriptor */
m->m.r.rtype = MUTEX_RTYPE;
m->m.r.res_register = SRP_res_register;
m->m.r.res_detach = SRP_res_detach;
 
/* fill the mutex_resource_des descriptor */
m->m.init = SRP_init;
m->m.destroy = SRP_destroy;
m->m.lock = SRP_lock;
m->m.trylock = SRP_lock; /* equal!!! */
m->m.unlock = SRP_unlock;
 
/* fill the SRP_mutex_resource_des descriptor */
for (i=0; i<MAX_PROC; i++) {
m->nlocked[i]=0;
m->proc_preempt[i].preempt = 0;
m->proc_preempt[i].next = NIL;
m->proc_preempt[i].prev = NIL;
}
 
m->tasklist = NIL;
m->current = NIL;
m->lobbylist = NIL;
 
m->srpstack = NULL;
m->srprecalc = NULL;
m->srplist = NULL;
 
return l;
}
 
/shark/trunk/modules/srp/subdir.mk
0,0 → 1,0
OBJS += srp/srp.o
/shark/trunk/modules/rr2/rr2/rr2.h
0,0 → 1,124
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: rr2.h,v 1.1 2005-02-25 10:40:58 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:40:58 $
------------
 
This file contains the scheduling module RR (Round Robin)
 
Title:
RR (Round Robin) version 2
 
Task Models Accepted:
NRT_TASK_MODEL - Non-Realtime Tasks
weight field is ignored
slice field is used to set the slice of a task, if it is !=0
policy field is ignored
inherit field is ignored
 
Description:
This module schedule his tasks following the classic round-robin
scheme. The default timeslice is given at registration time and is a
a per-task specification. The default timeslice is used if the slice
field in the NRT_TASK_MODEL is 0.
 
The module can SAVE or SKIP activations
There is another module, RR, thar always SKIP activations...
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests. When a guest operation
is called, the exception is raised.
 
Restrictions & special features:
- if specified, it creates at init time a task,
called "Main", attached to the function __init__().
- There must be only one module in the system that creates a task
attached to the function __init__().
- The level tries to guarantee that a task uses a "full" timeslice
before going to the queue tail. "full" means that a task can execute
a maximum time of slice+sys_tick due to the approx. done by
the Virtual Machine. If a task execute more time than the slice,
the next time it execute less...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __RR2_H__
#define __RR2_H__
 
#include <ll/ll.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
extern TASK __init__(void *arg);
 
 
 
/*+ Const: +*/
#define RR2_MINIMUM_SLICE 1000 /*+ Minimum Timeslice +*/
#define RR2_MAXIMUM_SLICE 500000 /*+ Maximum Timeslice +*/
 
#define RR2_MAIN_YES 1 /*+ The level creates the main +*/
#define RR2_MAIN_NO 0 /*+ The level does'nt create the main +*/
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *b used if createmain specified
returns the level number at which the module has been registered.
+*/
LEVEL RR2_register_level(TIME slice,
int createmain,
struct multiboot_info *mb);
 
__END_DECLS
#endif
/shark/trunk/modules/rr2/rr2.c
0,0 → 1,320
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: rr2.c,v 1.1 2005-02-25 10:40:58 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:40:58 $
------------
 
This file contains the scheduling module RR2 (Round Robin) version 2
 
Read rr2.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARR2ANTY; without even the implied waRR2anty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <rr2/rr2/rr2.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
/*+ Status used in the level +*/
#define RR2_READY MODULE_STATUS_BASE
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
IQUEUE ready; /*+ the ready queue +*/
 
int slice; /*+ the level's time slice +*/
 
struct multiboot_info *multiboot; /*+ used if the level have to insert
the main task +*/
} RR2_level_des;
 
 
/* This is not efficient but very fair :-)
The need of all this stuff is because if a task execute a long time
due to (shadow!) priority inheritance, then the task shall go to the
tail of the queue many times... */
static PID RR2_public_scheduler(LEVEL l)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
PID p;
 
for (;;) {
p = iq_query_first(&lev->ready);
if (p == -1)
return p;
 
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_extract(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
else
return p;
}
}
 
static int RR2_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
NRT_TASK_MODEL *nrt;
 
if (m->pclass != NRT_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
nrt = (NRT_TASK_MODEL *)m;
 
/* the task state is set at SLEEP by the general task_create
the only thing to set remains the capacity stuffs that are set
to the values passed in the model... */
 
/* I used the wcet field because using wcet can account if a task
consume more than the timeslice... */
 
if (nrt->slice) {
proc_table[p].avail_time = nrt->slice;
proc_table[p].wcet = nrt->slice;
}
else {
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
}
proc_table[p].control |= CONTROL_CAP;
 
if (nrt->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
 
return 0; /* OK */
}
 
static void RR2_public_dispatch(LEVEL l, PID p, int nostop)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
static void RR2_public_epilogue(LEVEL l, PID p)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
/* check if the slice is finished and insert the task in the coRR2ect
qqueue position */
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_insertlast(p,&lev->ready);
}
else
/* cuRR2 is >0, so the running task have to run for another cuRR2 usec */
iq_insertfirst(p,&lev->ready);
 
proc_table[p].status = RR2_READY;
}
 
static void RR2_public_activate(LEVEL l, PID p, struct timespec *t)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
/* Test if we are trying to activate a non sleeping task */
/* save activation (only if needed... */
if (proc_table[p].status != SLEEP) {
if (lev->nact[p] != -1)
lev->nact[p]++;
return;
}
 
/* Insert task in the coRR2ect position */
proc_table[p].status = RR2_READY;
iq_insertlast(p,&lev->ready);
}
 
static void RR2_public_unblock(LEVEL l, PID p)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
/* Similar to RR2_task_activate,
but we don't check in what state the task is */
 
/* Insert task in the coRR2ect position */
proc_table[p].status = RR2_READY;
iq_insertlast(p,&lev->ready);
}
 
static void RR2_public_block(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
 
So, we do nothing!!!
*/
}
 
static int RR2_public_message(LEVEL l, PID p, void *m)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
if (lev->nact[p] > 0) {
/* continue!!!! */
lev->nact[p]--;
iq_insertfirst(p,&lev->ready);
proc_table[p].status = RR2_READY;
}
else
proc_table[p].status = SLEEP;
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
return 0;
}
 
static void RR2_public_end(LEVEL l, PID p)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
lev->nact[p] = -1;
 
/* then, we insert the task in the free queue */
proc_table[p].status = FREE;
iq_insertlast(p,&freedesc);
}
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
static void RR2_call_main(void *l)
{
LEVEL lev;
PID p;
NRT_TASK_MODEL m;
void *mb;
 
lev = (LEVEL)l;
 
nrt_task_default_model(m);
nrt_task_def_level(m,lev); /* with this we are sure that the task aRR2ives
to the coRR2ect level */
 
mb = ((RR2_level_des *)level_table[lev])->multiboot;
nrt_task_def_arg(m,mb);
nrt_task_def_usemath(m);
nrt_task_def_nokill(m);
nrt_task_def_ctrl_jet(m);
 
p = task_create("Main", __init__, (TASK_MODEL *)&m, NULL);
 
if (p == NIL)
printk("\nPanic!!! can't create main task...\n");
 
RR2_public_activate(lev,p,NULL);
}
 
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified +*/
LEVEL RR2_register_level(TIME slice,
int createmain,
struct multiboot_info *mb)
{
LEVEL l; /* the level that we register */
RR2_level_des *lev; /* for readableness only */
PID i;
 
printk("RR2_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(RR2_level_des));
 
lev = (RR2_level_des *)level_table[l];
 
/* fill the standard descriptor */
lev->l.public_scheduler = RR2_public_scheduler;
lev->l.public_create = RR2_public_create;
lev->l.public_end = RR2_public_end;
lev->l.public_dispatch = RR2_public_dispatch;
lev->l.public_epilogue = RR2_public_epilogue;
lev->l.public_activate = RR2_public_activate;
lev->l.public_unblock = RR2_public_unblock;
lev->l.public_block = RR2_public_block;
lev->l.public_message = RR2_public_message;
 
/* fill the RR2 descriptor part */
for (i = 0; i < MAX_PROC; i++)
lev->nact[i] = -1;
 
iq_init(&lev->ready, &freedesc, 0);
 
if (slice < RR2_MINIMUM_SLICE) slice = RR2_MINIMUM_SLICE;
if (slice > RR2_MAXIMUM_SLICE) slice = RR2_MAXIMUM_SLICE;
lev->slice = slice;
 
lev->multiboot = mb;
 
if (createmain)
sys_atrunlevel(RR2_call_main,(void *) l, RUNLEVEL_INIT);
 
return l;
}
 
 
/shark/trunk/modules/rr2/subdir.mk
0,0 → 1,0
OBJS += rr2/rr2.o
/shark/trunk/modules/ds/ds/ds.h
0,0 → 1,134
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: ds.h,v 1.1 2005-02-25 10:53:02 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:53:02 $
------------
 
This file contains the aperiodic server DS (Polling Server)
 
Title:
DS (Deferrable Server)
 
Task Models Accepted:
SOFT_TASK_MODEL - Soft Tasks
wcet field is ignored
met field is ignored
period field is ignored
periodicity field can be only APERIODIC
arrivals field can be either SAVE or SKIP
 
Description:
This module schedule his tasks following the Deferrable Server scheme.
 
All the tasks are put in a FIFO (FCFS) queue and at a time only the first
task in the queue is put in the upper level.
 
The module remembers pending activations when calling task_sleep...
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests. When a guest operation
is called, the exception is raised.
 
Restrictions & special features:
- This level doesn't manage the main task.
- At init time we have to specify:
. The Capacity and the period used by the server
- The level don't use the priority field.
- A function to return the used bandwidth of the level is provided.
- if an aperiodic task calls a task_delay when owning a mutex implemented
with shadows, the delay may have no effect, so don't use delay when
using a mutex!!!
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __DS_H__
#define __DS_H__
 
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/*+ 1 - ln(2) +*/
#ifndef RM_MINFREEBANDWIDTH
#define RM_MINFREEBANDWIDTH 1317922825
#endif
 
/*+ flags... +*/
#define DS_DISABLE_ALL 0
#define DS_ENABLE_BACKGROUND 1 /*+ Background scheduling enabled +*/
#define DS_ENABLE_GUARANTEE_EDF 2 /*+ Task Guarantee enabled +*/
#define DS_ENABLE_ALL_EDF 3 /*+ All flags enabled +*/
 
#define DS_ENABLE_GUARANTEE_RM 4 /*+ Task Guarantee enabled +*/
#define DS_ENABLE_ALL_RM 5 /*+ All flags enabled +*/
 
/*+ internal flags +*/
#define DS_BACKGROUND 8 /*+ this flag is set when scheduling
in background +*/
#define DS_BACKGROUND_BLOCK 16 /*+ this flag is set when we want to
blocks the background scheduling +*/
 
/*+ Registration function:
bandwidth_t b Max bandwidth used by the TBS
int flags Options to be used in this level instance...
LEVEL master the level that must be used as master level for the
TBS tasks
int num,den used to compute the TBS bandwidth
 
returns the level number at which the module has been registered.
+*/
LEVEL DS_register_level(int flags, LEVEL master, int Cs, int per);
 
/*+ Returns the used bandwidth of a level +*/
bandwidth_t DS_usedbandwidth(LEVEL l);
 
__END_DECLS
#endif
/shark/trunk/modules/ds/ds.c
0,0 → 1,483
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: ds.c,v 1.1 2005-02-25 10:53:02 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:53:02 $
------------
 
This file contains the aperiodic server DS (Deferrable Server)
 
This module is directly derived from the Polling server one.
All the notes writed for the PS are valid for the DS.
 
The difference between DS and PS is that when there are not task to
schedule the capacity is not reset to 0...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <ds/ds/ds.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
/*+ Status used in the level +*/
#define DS_WAIT APER_STATUS_BASE /*+ waiting the service +*/
 
/*+ the level redefinition for the Total Bandwidth Server level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
/* The wcet are stored in the task descriptor's priority
field, so no other fields are needed */
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
struct timespec lastdline; /*+ the last deadline assigned to
a DS task +*/
 
int Cs; /*+ server capacity +*/
int availCs; /*+ server avail time +*/
 
IQUEUE wait; /*+ the wait queue of the DS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth by the server +*/
int period;
 
LEVEL scheduling_level;
 
} DS_level_des;
 
/* This static function activates the task pointed by lev->activated) */
static __inline__ void DS_activation(DS_level_des *lev)
{
PID p; /* for readableness */
JOB_TASK_MODEL j; /* the guest model */
LEVEL m; /* the master level... only for readableness*/
 
p = lev->activated;
m = lev->scheduling_level;
job_task_default_model(j,lev->lastdline);
job_task_def_period(j,lev->period);
level_table[m]->private_insert(m,p,(TASK_MODEL *)&j);
// kern_printf("(%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec);
}
 
static void DS_deadline_timer(void *a)
{
DS_level_des *lev = (DS_level_des *)(level_table[(LEVEL)a]);
 
ADDUSEC2TIMESPEC(lev->period, &lev->lastdline);
 
// kern_printf("(%d:%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec, lev->period);
if (lev->availCs >= 0)
lev->availCs = lev->Cs;
else
lev->availCs += lev->Cs;
 
/* availCs may be <0 because a task executed via a shadow fo many time
lev->activated == NIL only if the prec task was finished and there
was not any other task to be put in the ready queue
... we are now activating the next task */
if (lev->availCs > 0 && lev->activated == NIL) {
if (iq_query_first(&lev->wait) != NIL) {
lev->activated = iq_getfirst(&lev->wait);
DS_activation(lev);
event_need_reschedule();
}
}
 
kern_event_post(&lev->lastdline, DS_deadline_timer, a);
// kern_printf("!");
}
 
static PID DS_public_schedulerbackground(LEVEL l)
{
/* the DS catch the background time to exec aperiodic activities */
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
lev->flags |= DS_BACKGROUND;
 
if (lev->flags & DS_BACKGROUND_BLOCK)
return NIL;
else
return iq_query_first(&lev->wait);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int DS_public_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int DS_public_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
if (*freebandwidth > lev->U + RM_MINFREEBANDWIDTH) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int DS_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
SOFT_TASK_MODEL *s;
 
if (m->pclass != SOFT_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
s = (SOFT_TASK_MODEL *)m;
if (s->periodicity != APERIODIC) return -1;
s = (SOFT_TASK_MODEL *)m;
 
if (s->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void DS_public_dispatch(LEVEL l, PID p, int nostop)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
struct timespec ty;
 
// if (nostop) kern_printf("NOSTOP!!!!!!!!!!!!");
/* there is at least one task ready inserted in an EDF or similar
level note that we can't check the status because the scheduler set it
to exe before calling task_dispatch. we have to check
lev->activated != p instead */
if (lev->activated != p) {
iq_extract(p, &lev->wait);
//kern_printf("#%d#",p);
}
else {
//if (nostop) kern_printf("(gd status=%d)",proc_table[p].status);
level_table[ lev->scheduling_level ]->
private_dispatch(lev->scheduling_level,p,nostop);
}
 
/* set the capacity timer */
if (!nostop) {
TIMESPEC_ASSIGN(&ty, &schedule_time);
ADDUSEC2TIMESPEC(lev->availCs,&ty);
cap_timer = kern_event_post(&ty, capacity_timer, NULL);
}
 
// kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec);
}
 
static void DS_public_epilogue(LEVEL l, PID p)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & DS_BACKGROUND)
lev->flags &= ~DS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
// kern_printf("(epil %d %d)",lev->availCs, proc_table[p].avail_time);
 
/* check if the server capacity is finished... */
if (lev->availCs < 0) {
// kern_printf("(epil Cs%d %d:%d act%d p%d)",
// lev->availCs,proc_table[p].timespec_priority.tv_sec,
// proc_table[p].timespec_priority.tv_nsec,
// lev->activated,p);
/* the server slice has finished... do the task_end!!!
a first version of the module used the task_endcycle, but it was
not conceptually correct because the task didn't stop because it
finished all the work but because the server didn't have budget!
So, if the task_endcycle is called, the task remain into the
master level, and we can't wake him up if, for example, another
task point the shadow to it!!!*/
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = DS_WAIT;
lev->activated = NIL;
}
else
/* the task has been preempted. it returns into the ready queue or to the
wait queue by calling the guest_epilogue... */
if (lev->activated == p) {//kern_printf("Û1");
level_table[ lev->scheduling_level ]->
private_epilogue(lev->scheduling_level,p);
} else { //kern_printf("Û2");
iq_insertfirst(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
}
 
static void DS_public_activate(LEVEL l, PID p, struct timespec *t)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
if (lev->activated == p || proc_table[p].status == DS_WAIT) {
if (lev->nact[p] != -1)
lev->nact[p]++;
}
else if (proc_table[p].status == SLEEP) {
 
if (lev->activated == NIL && lev->availCs > 0) {
lev->activated = p;
DS_activation(lev);
}
else {
iq_insertlast(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
}
else
{ kern_printf("DS_REJ%d %d %d %d ",p, proc_table[p].status, lev->activated, lev->wait.first);
return; }
 
}
 
static void DS_public_unblock(LEVEL l, PID p)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
lev->flags &= ~DS_BACKGROUND_BLOCK;
 
lev->activated = -1;
 
/* when we reinsert the task into the system, the server capacity
is always 0 because nobody executes with the DS before... */
iq_insertfirst(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
 
static void DS_public_block(LEVEL l, PID p)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
/* update the server capacity */
lev->availCs = 0;
 
lev->flags |= DS_BACKGROUND_BLOCK;
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
}
 
static int DS_public_message(LEVEL l, PID p, void *m)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & DS_BACKGROUND)
lev->flags &= ~DS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
 
if (lev->nact[p] > 0)
{
lev->nact[p]--;
iq_insertlast(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
else
proc_table[p].status = SLEEP;
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
DS_activation(lev);
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
return 0;
}
 
static void DS_public_end(LEVEL l, PID p)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & DS_BACKGROUND)
lev->flags &= ~DS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
 
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
DS_activation(lev);
}
 
/* Registration functions */
 
 
/*+ This init function install the DS deadline timer
+*/
static void DS_dline_install(void *l)
{
DS_level_des *lev = (DS_level_des *)(level_table[(LEVEL)l]);
 
kern_gettime(&lev->lastdline);
ADDUSEC2TIMESPEC(lev->period, &lev->lastdline);
 
kern_event_post(&lev->lastdline, DS_deadline_timer, l);
}
 
 
 
/*+ Registration function:
int flags the init flags ... see DS.h +*/
LEVEL DS_register_level(int flags, LEVEL master, int Cs, int per)
{
LEVEL l; /* the level that we register */
DS_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("DS_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(DS_level_des));
 
lev = (DS_level_des *)level_table[l];
 
/* fill the standard descriptor */
 
if (flags & DS_ENABLE_BACKGROUND)
lev->l.public_scheduler = DS_public_schedulerbackground;
 
if (flags & DS_ENABLE_GUARANTEE_EDF)
lev->l.public_guarantee = DS_public_guaranteeEDF;
else if (flags & DS_ENABLE_GUARANTEE_RM)
lev->l.public_guarantee = DS_public_guaranteeRM;
else
lev->l.public_guarantee = NULL;
 
lev->l.public_create = DS_public_create;
lev->l.public_end = DS_public_end;
lev->l.public_dispatch = DS_public_dispatch;
lev->l.public_epilogue = DS_public_epilogue;
lev->l.public_activate = DS_public_activate;
lev->l.public_unblock = DS_public_unblock;
lev->l.public_block = DS_public_block;
lev->l.public_message = DS_public_message;
 
/* fill the DS descriptor part */
 
for (i=0; i<MAX_PROC; i++)
lev->nact[i] = -1;
 
lev->Cs = Cs;
lev->availCs = 0;
 
lev->period = per;
 
iq_init(&lev->wait, &freedesc, 0);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / per) * Cs;
 
lev->scheduling_level = master;
 
lev->flags = flags & 0x07;
 
sys_atrunlevel(DS_dline_install,(void *) l, RUNLEVEL_INIT);
 
return l;
}
 
bandwidth_t DS_usedbandwidth(LEVEL l)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
return lev->U;
}
 
/shark/trunk/modules/ds/subdir.mk
0,0 → 1,0
OBJS += ds/ds.o
/shark/trunk/modules/hardcbs/hardcbs/hardcbs.h
0,0 → 1,137
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* Giacomo Guidi <giacomo@gandalf.sssup.it>
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
/*
This file contains the aperiodic server CBS (Total Bandwidth Server)
with hard reservation and met/period update
 
Title:
CBS (Constant Bandwidth Server)
 
Task Models Accepted:
SOFT_TASK_MODEL - Soft Tasks
wcet field is ignored
met field must be != 0
period field must be != 0
periodicity field can be either PERIODIC or APERIODIC
arrivals field can be either SAVE or SKIP
 
Description:
This module schedule his tasks following the CBS scheme.
(see Luca Abeni and Giorgio Buttazzo,
"Integrating Multimedia Applications in Hard Real-Time Systems"
Proceedings of the IEEE Real-Time Systems Symposium, Madrid, Spain,
December 1998)
 
The tasks are inserted in an EDF level (or similar) with a JOB_TASK_MODEL,
and the CBS level expects that the task is scheduled with the absolute
deadline passed in the model.
 
The task guarantee is based on the factor utilization approach.
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests. When a guest operation
is called, the exception is raised.
 
These exceptions are pclass-dependent...
XDEADLINE_MISS
If a task miss his deadline, the exception is raised.
Normally, a CBS task can't cause the raise of such exception because
if it really use more time than declared the deadline is postponed.
 
Restrictions & special features:
- This level doesn't manage the main task.
- At init time we have to specify:
. guarantee check
(when all task are created the system will check that the task_set
will not use more than the available bandwidth)
- A function to return the used bandwidth of the level is provided.
- A function to return the pending activations of the task.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __HCBS_H__
#define __HCBS_H__
 
#include <ll/ll.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/*+ flags... +*/
#define HCBS_DISABLE_ALL 0 /*+ Task Guarantee enabled +*/
#define HCBS_ENABLE_GUARANTEE 1 /*+ Task Guarantee enabled +*/
#define HCBS_ENABLE_ALL 1
 
#define HCBS_SET_PERIOD 0
#define HCBS_GET_PERIOD 1
#define HCBS_SET_MET 2
#define HCBS_GET_MET 3
 
typedef struct {
int command;
TIME param;
 
} HCBS_command_message;
 
/*+ Registration function:
int flags Options to be used in this level instance...
LEVEL master the level that must be used as master level for the
CBS tasks
 
returns the level number at which the module has been registered.
+*/
LEVEL HCBS_register_level(int flags, LEVEL master);
 
/*+ Returns the used bandwidth of a level +*/
bandwidth_t HCBS_usedbandwidth(LEVEL l);
 
/*+ Returns the number of pending activations of a task.
No control is done if the task is not a CBS task! +*/
int HCBS_get_nact(LEVEL l, PID p);
 
__END_DECLS
#endif
/shark/trunk/modules/hardcbs/hardcbs.c
0,0 → 1,589
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* Giacomo Guidi <giacomo@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
#include <hardcbs/hardcbs/hardcbs.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
/*+ Status used in the level +*/
#define HCBS_IDLE APER_STATUS_BASE /*+ waiting the activation +*/
#define HCBS_ZOMBIE APER_STATUS_BASE+1 /*+ waiting the period end +*/
 
/*+ task flags +*/
#define HCBS_SAVE_ARRIVALS 1
#define HCBS_APERIODIC 2
#define HCBS_SLEEP 4
#define HCBS_OVERLOAD 8
 
/*+ the level redefinition for the Total Bandwidth Server level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
/* The wcet are stored in the task descriptor, but we need
an array for the deadlines. We can't use the timespec_priority
field because it is used by the master level!!!...
Notice that however the use of the timespec_priority field
does not cause any problem... */
 
struct timespec cbs_dline[MAX_PROC]; /*+ CBS deadlines +*/
 
TIME period[MAX_PROC]; /*+ CBS activation period +*/
 
struct timespec reactivation_time[MAX_PROC];
/*+ the time at witch the reactivation timer is post +*/
int reactivation_timer[MAX_PROC];
/*+ the recativation timer +*/
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
BYTE flag[MAX_PROC]; /*+ task flags +*/
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth by the server +*/
 
LEVEL scheduling_level;
 
} HCBS_level_des;
 
static void HCBS_activation(HCBS_level_des *lev,
PID p,
struct timespec *acttime)
{
JOB_TASK_MODEL job;
 
/* we have to check if the deadline and the wcet are correct before
activating a new task or an old task... */
 
/* check 1: if the deadline is before than the actual scheduling time */
 
/* check 2: if ( avail_time >= (cbs_dline - acttime)* (wcet/period) )
(rule 7 in the CBS article!) */
TIME t;
struct timespec t2,t3;
 
t = (lev->period[p] * proc_table[p].avail_time) / proc_table[p].wcet;
t3.tv_sec = t / 1000000;
t3.tv_nsec = (t % 1000000) * 1000;
 
SUBTIMESPEC(&lev->cbs_dline[p], acttime, &t2);
 
if (/* 1 */ TIMESPEC_A_LT_B(&lev->cbs_dline[p], acttime) ||
/* 2 */ TIMESPEC_A_GT_B(&t3, &t2) ) {
/* if (TIMESPEC_A_LT_B(&lev->cbs_dline[p], acttime) )
kern_printf("$");
else
kern_printf("(Ûdline%d.%d act%d.%d wcet%d per%d avail%dÛ)",
lev->cbs_dline[p].tv_sec,lev->cbs_dline[p].tv_nsec/1000,
acttime->tv_sec, acttime->tv_nsec/1000,
proc_table[p].wcet, lev->period[p], proc_table[p].avail_time);
*/ /* we modify the deadline ... */
TIMESPEC_ASSIGN(&lev->cbs_dline[p], acttime);
ADDUSEC2TIMESPEC(lev->period[p], &lev->cbs_dline[p]);
 
/* and the capacity */
proc_table[p].avail_time = proc_table[p].wcet;
}
 
/* and, finally, we reinsert the task in the master level */
job_task_default_model(job, lev->cbs_dline[p]);
job_task_def_noexc(job);
level_table[ lev->scheduling_level ]->
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job);
}
 
static void HCBS_reload(HCBS_level_des *lev, PID p)
{
 
proc_table[p].avail_time += proc_table[p].wcet;
}
 
static void HCBS_avail_time_check(HCBS_level_des *lev, PID p)
{
/* Only one time due to hard reservation */
if (proc_table[p].avail_time < 0)
ADDUSEC2TIMESPEC(lev->period[p], &lev->cbs_dline[p]);
 
}
 
 
/* this is the periodic reactivation of the task... it is posted only
if the task is a periodic task */
static void HCBS_timer_reactivate(void *par)
{
PID p = (PID) par;
HCBS_level_des *lev;
 
lev = (HCBS_level_des *)level_table[proc_table[p].task_level];
 
if (lev->flag[p] & HCBS_SLEEP && proc_table[p].status == HCBS_IDLE) {
proc_table[p].status = SLEEP;
proc_table[p].avail_time = proc_table[p].wcet;
NULL_TIMESPEC(&lev->cbs_dline[p]);
return;
}
 
/* Hard reservation avail_time reload */
HCBS_reload(lev, p);
 
if (proc_table[p].status == HCBS_IDLE) {
/* the task has finished the current activation and must be
reactivated */
HCBS_activation(lev,p,&lev->reactivation_time[p]);
 
event_need_reschedule();
}
else if (lev->flag[p] & HCBS_SAVE_ARRIVALS)
/* the task has not completed the current activation, so we save
the activation incrementing nact... */
lev->nact[p]++;
 
if (!(lev->flag[p] & HCBS_APERIODIC)) {
/* repost the event at the next period end... */
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
HCBS_timer_reactivate,
(void *)p);
}
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
 
}
 
/*+ this function is called when a killed or ended task reach the
period end +*/
static void HCBS_timer_zombie(void *par)
{
PID p = (PID) par;
HCBS_level_des *lev;
 
lev = (HCBS_level_des *)level_table[proc_table[p].task_level];
 
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
 
}
 
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int HCBS_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int HCBS_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
SOFT_TASK_MODEL *soft;
 
if (m->pclass != SOFT_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
soft = (SOFT_TASK_MODEL *)m;
if (!(soft->met && soft->period)) return -1;
 
soft = (SOFT_TASK_MODEL *)m;
 
if (lev->flags & HCBS_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / soft->period) * soft->met;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b)
lev->U += b;
else
return -1;
}
 
/* Enable wcet check */
proc_table[p].avail_time = soft->met;
proc_table[p].wcet = soft->met;
proc_table[p].control |= CONTROL_CAP;
 
lev->nact[p] = 0;
lev->period[p] = soft->period;
NULL_TIMESPEC(&lev->cbs_dline[p]);
 
if (soft->periodicity == APERIODIC)
lev->flag[p] = HCBS_APERIODIC;
else
lev->flag[p] = 0;
 
if (soft->arrivals == SAVE_ARRIVALS)
lev->flag[p] |= HCBS_SAVE_ARRIVALS;
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void HCBS_public_detach(LEVEL l, PID p)
{
/* the CBS level doesn't introduce any dinamic allocated new field.
we have only to decrement the allocated bandwidth */
 
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
 
if (lev->flags & HCBS_ENABLE_GUARANTEE) {
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet;
}
}
 
static int HCBS_public_eligible(LEVEL l, PID p)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
JOB_TASK_MODEL job;
 
/* we have to check if the deadline and the wcet are correct...
if the CBS level schedules in background with respect to others
levels, there can be the case in witch a task is scheduled by
schedule_time > CBS_deadline; in this case (not covered in the
article because if there is only the standard scheduling policy
this never apply) we reassign the deadline */
 
if ( TIMESPEC_A_LT_B(&lev->cbs_dline[p], &schedule_time) ) {
/* we kill the current activation */
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level, p);
 
/* we modify the deadline ... */
TIMESPEC_ASSIGN(&lev->cbs_dline[p], &schedule_time);
ADDUSEC2TIMESPEC(lev->period[p], &lev->cbs_dline[p]);
 
/* and the capacity */
proc_table[p].avail_time = proc_table[p].wcet;
/* and, finally, we reinsert the task in the master level */
job_task_default_model(job, lev->cbs_dline[p]);
job_task_def_noexc(job);
level_table[ lev->scheduling_level ]->
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job);
 
return -1;
}
 
return 0;
}
 
static void HCBS_public_dispatch(LEVEL l, PID p, int nostop)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
level_table[ lev->scheduling_level ]->
private_dispatch(lev->scheduling_level,p,nostop);
}
 
static void HCBS_public_epilogue(LEVEL l, PID p)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
if ( proc_table[p].avail_time <= 0) {
/* Set the overload flag */
lev->flag[p] |= HCBS_OVERLOAD;
 
/* we kill the current activation */
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level, p);
 
/* we modify the deadline according to rule 4 ... */
HCBS_avail_time_check(lev, p);
 
/* Hard Reservation: The avail_time will be reloaded in the
next reactivation event. The task is suspended */
proc_table[p].status = HCBS_IDLE;
if (lev->flag[p] & HCBS_APERIODIC) {
 
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
HCBS_timer_reactivate,
(void *)p);
}
 
}
else
/* the task has been preempted. it returns into the ready queue by
calling the guest_epilogue... */
level_table[ lev->scheduling_level ]->
private_epilogue(lev->scheduling_level,p);
}
 
static void HCBS_public_activate(LEVEL l, PID p, struct timespec *t)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
 
if (lev->flag[p] & HCBS_SLEEP) {
lev->flag[p] &= ~HCBS_SLEEP;
if (proc_table[p].status != SLEEP) return;
}
 
/* save activation (only if needed... */
if (proc_table[p].status != SLEEP) {
if (lev->flag[p] & HCBS_SAVE_ARRIVALS)
lev->nact[p]++;
return;
}
 
HCBS_activation(lev, p, t);
 
/* Set the reactivation timer */
if (!(lev->flag[p] & HCBS_APERIODIC))
{
/* we cannot use the deadline computed by CBS_activation because
the deadline may be != from actual_time + period
(if we call the task_activate after a task_sleep, and the
deadline was postponed a lot...) */
TIMESPEC_ASSIGN(&lev->reactivation_time[p], t);
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
HCBS_timer_reactivate,
(void *)p);
}
// kern_printf("act : %d %d |",lev->cbs_dline[p].tv_nsec/1000,p);
}
 
static void HCBS_public_unblock(LEVEL l, PID p)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
struct timespec acttime;
 
kern_gettime(&acttime);
 
HCBS_activation(lev,p,&acttime);
}
 
static void HCBS_public_block(LEVEL l, PID p)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
HCBS_avail_time_check(lev, p);
 
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
}
 
static int HCBS_public_message(LEVEL l, PID p, void *m)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
HCBS_command_message *msg;
 
switch((long)(m)) {
 
case (long)(NULL):
 
/* check if the wcet is finished... */
HCBS_avail_time_check(lev, p);
 
if (lev->flag[p] & HCBS_OVERLOAD) {
lev->nact[p]++;
lev->flag[p] &= ~HCBS_OVERLOAD;
}
 
if (lev->nact[p]) {
/* continue!!!! */
lev->nact[p]--;
level_table[ lev->scheduling_level ]->
private_epilogue(lev->scheduling_level,p);
} else {
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
 
if (lev->flag[p] & HCBS_APERIODIC)
proc_table[p].status = SLEEP;
else /* the task is soft_periodic */
proc_table[p].status = HCBS_IDLE;
}
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
break;
 
case 1:
 
lev->flag[p] |= HCBS_SLEEP;
TRACER_LOGEVENT(FTrace_EVT_task_disable,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
break;
 
/* Set/Get Met/Period */
default:
 
msg = (HCBS_command_message *)(m);
switch (msg->command) {
case HCBS_SET_PERIOD:
lev->U -= (bandwidth_t)(MAX_BANDWIDTH / lev->period[p]
* proc_table[p].wcet);
lev->period[p] = msg->param;
lev->U += (bandwidth_t)(MAX_BANDWIDTH / lev->period[p]
* proc_table[p].wcet);
break;
case HCBS_GET_PERIOD:
msg->param = lev->period[p];
break;
case HCBS_SET_MET:
lev->U -= (bandwidth_t)(MAX_BANDWIDTH / lev->period[p]
* proc_table[p].wcet);
proc_table[p].wcet = msg->param;
lev->U += (bandwidth_t)(MAX_BANDWIDTH / lev->period[p]
* proc_table[p].wcet);
break;
case HCBS_GET_MET:
msg->param = proc_table[p].wcet;
break;
}
 
}
 
return 0;
 
}
 
static void HCBS_public_end(LEVEL l, PID p)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
HCBS_avail_time_check(lev, p);
 
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
 
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
 
/* we delete the reactivation timer */
if (!(lev->flag[p] & HCBS_APERIODIC) ||
(lev->flag[p] & HCBS_APERIODIC && lev->flag[p] & HCBS_OVERLOAD)) {
kern_event_delete(lev->reactivation_timer[p]);
lev->reactivation_timer[p] = -1;
}
 
/* Finally, we post the zombie event. when the end period is reached,
the task descriptor and banwidth are freed */
proc_table[p].status = HCBS_ZOMBIE;
lev->reactivation_timer[p] = kern_event_post(&lev->cbs_dline[p],
HCBS_timer_zombie,
(void *)p);
}
 
/* Registration functions */
 
/*+ Registration function:
int flags the init flags ... see CBS.h +*/
LEVEL HCBS_register_level(int flags, LEVEL master)
{
LEVEL l; /* the level that we register */
HCBS_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("HCBS_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(HCBS_level_des));
 
lev = (HCBS_level_des *)level_table[l];
 
/* fill the standard descriptor */
if (flags & HCBS_ENABLE_GUARANTEE)
lev->l.public_guarantee = HCBS_public_guarantee;
else
lev->l.public_guarantee = NULL;
lev->l.public_create = HCBS_public_create;
lev->l.public_detach = HCBS_public_detach;
lev->l.public_end = HCBS_public_end;
lev->l.public_eligible = HCBS_public_eligible;
lev->l.public_dispatch = HCBS_public_dispatch;
lev->l.public_epilogue = HCBS_public_epilogue;
lev->l.public_activate = HCBS_public_activate;
lev->l.public_unblock = HCBS_public_unblock;
lev->l.public_block = HCBS_public_block;
lev->l.public_message = HCBS_public_message;
 
/* fill the CBS descriptor part */
for (i=0; i<MAX_PROC; i++) {
NULL_TIMESPEC(&lev->cbs_dline[i]);
lev->period[i] = 0;
NULL_TIMESPEC(&lev->reactivation_time[i]);
lev->reactivation_timer[i] = -1;
lev->nact[i] = 0;
lev->flag[i] = 0;
}
 
 
lev->U = 0;
 
lev->scheduling_level = master;
 
lev->flags = flags;
 
return l;
}
 
bandwidth_t HCBS_usedbandwidth(LEVEL l)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
 
return lev->U;
}
 
int HCBS_get_nact(LEVEL l, PID p)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
 
return lev->nact[p];
}
 
/shark/trunk/modules/hardcbs/subdir.mk
0,0 → 1,0
OBJS += hardcbs/hardcbs.o
/shark/trunk/modules/cbs/cbs/cbs.h
0,0 → 1,132
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: cbs.h,v 1.1 2005-02-25 10:53:02 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:53:02 $
------------
 
This file contains the aperiodic server CBS (Total Bandwidth Server)
 
Title:
CBS (Constant Bandwidth Server)
 
Task Models Accepted:
SOFT_TASK_MODEL - Soft Tasks
wcet field is ignored
met field must be != 0
period field must be != 0
periodicity field can be either PERIODIC or APERIODIC
arrivals field can be either SAVE or SKIP
 
Description:
This module schedule his tasks following the CBS scheme.
(see Luca Abeni and Giorgio Buttazzo,
"Integrating Multimedia Applications in Hard Real-Time Systems"
Proceedings of the IEEE Real-Time Systems Symposium, Madrid, Spain,
December 1998)
 
The tasks are inserted in an EDF level (or similar) with a JOB_TASK_MODEL,
and the CBS level expects that the task is scheduled with the absolute
deadline passed in the model.
 
The task guarantee is based on the factor utilization approach.
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests. When a guest operation
is called, the exception is raised.
 
These exceptions are pclass-dependent...
XDEADLINE_MISS
If a task miss his deadline, the exception is raised.
Normally, a CBS task can't cause the raise of such exception because
if it really use more time than declared the deadline is postponed.
 
Restrictions & special features:
- This level doesn't manage the main task.
- At init time we have to specify:
. guarantee check
(when all task are created the system will check that the task_set
will not use more than the available bandwidth)
- A function to return the used bandwidth of the level is provided.
- A function to return the pending activations of the task.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __CBS_H__
#define __CBS_H__
 
#include <ll/ll.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/*+ flags... +*/
#define CBS_DISABLE_ALL 0 /*+ Task Guarantee enabled +*/
#define CBS_ENABLE_GUARANTEE 1 /*+ Task Guarantee enabled +*/
#define CBS_ENABLE_ALL 1
 
/*+ Registration function:
int flags Options to be used in this level instance...
LEVEL master the level that must be used as master level for the
CBS tasks
 
returns the level number at which the module has been registered.
+*/
LEVEL CBS_register_level(int flags, LEVEL master);
 
/*+ Returns the used bandwidth of a level +*/
bandwidth_t CBS_usedbandwidth(LEVEL l);
 
/*+ Returns the number of pending activations of a task.
No control is done if the task is not a CBS task! +*/
int CBS_get_nact(LEVEL l, PID p);
 
__END_DECLS
#endif
/shark/trunk/modules/cbs/cbs.c
0,0 → 1,596
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: cbs.c,v 1.1 2005-02-25 10:53:02 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:53:02 $
------------
 
This file contains the aperiodic server CBS (Total Bandwidth Server)
 
Read CBS.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <cbs/cbs/cbs.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
/*+ 4 debug purposes +*/
#undef CBS_TEST
#undef CBS_COUNTER
 
#ifdef TESTG
#include "drivers/glib.h"
TIME x,oldx;
extern TIME starttime;
#endif
 
 
/*+ Status used in the level +*/
#define CBS_IDLE APER_STATUS_BASE /*+ waiting the activation +*/
#define CBS_ZOMBIE APER_STATUS_BASE+1 /*+ waiting the period end +*/
 
/*+ task flags +*/
#define CBS_SAVE_ARRIVALS 1
#define CBS_APERIODIC 2
#define CBS_SLEEP 4
 
/*+ the level redefinition for the Total Bandwidth Server level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
/* The wcet are stored in the task descriptor, but we need
an array for the deadlines. We can't use the timespec_priority
field because it is used by the master level!!!...
Notice that however the use of the timespec_priority field
does not cause any problem... */
 
struct timespec cbs_dline[MAX_PROC]; /*+ CBS deadlines +*/
 
TIME period[MAX_PROC]; /*+ CBS activation period +*/
 
struct timespec reactivation_time[MAX_PROC];
/*+ the time at witch the reactivation timer is post +*/
int reactivation_timer[MAX_PROC];
/*+ the recativation timer +*/
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
BYTE flag[MAX_PROC]; /*+ task flags +*/
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth by the server +*/
 
LEVEL scheduling_level;
 
} CBS_level_des;
 
#ifdef CBS_COUNTER
int cbs_counter=0;
int cbs_counter2=0;
#endif
 
 
static void CBS_activation(CBS_level_des *lev,
PID p,
struct timespec *acttime)
{
JOB_TASK_MODEL job;
 
/* we have to check if the deadline and the wcet are correct before
activating a new task or an old task... */
 
/* check 1: if the deadline is before than the actual scheduling time */
/* check 2: if ( avail_time >= (cbs_dline - acttime)* (wcet/period) )
(rule 7 in the CBS article!) */
 
/* CHANGES by Anton Cervin 2004-06-23:
a) only check: if ( acttime + (avail_time * period) / wcet > cbs_dline )
b) without (long long) in the computation we can have an overflow!! */
TIME t;
struct timespec t2;
 
t = ((long long)lev->period[p] * (long long)proc_table[p].avail_time)
/ (long long)proc_table[p].wcet;
t2 = *acttime;
ADDUSEC2TIMESPEC(t, &t2);
 
if (TIMESPEC_A_GT_B(&t2, &lev->cbs_dline[p])) {
 
/* we modify the deadline ... */
TIMESPEC_ASSIGN(&lev->cbs_dline[p], acttime);
ADDUSEC2TIMESPEC(lev->period[p], &lev->cbs_dline[p]);
 
/* and the capacity */
proc_table[p].avail_time = proc_table[p].wcet;
}
 
#ifdef TESTG
if (starttime && p == 3) {
oldx = x;
x = ((lev->cbs_dline[p].tv_sec*1000000+lev->cbs_dline[p].tv_nsec/1000)/5000 - starttime) + 20;
// kern_printf("(a%d)",lev->cbs_dline[p].tv_sec*1000000+lev->cbs_dline[p].tv_nsec/1000);
if (oldx > x) kern_raise(XUNSPECIFIED_EXCEPTION, exec_shadow);
if (x<640)
grx_plot(x, 15, 8);
}
#endif
 
/* and, finally, we reinsert the task in the master level */
job_task_default_model(job, lev->cbs_dline[p]);
job_task_def_noexc(job);
level_table[ lev->scheduling_level ]->
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job);
}
 
static void CBS_avail_time_check(CBS_level_des *lev, PID p)
{
/* there is a while because if the wcet is << than the system tick
we need to postpone the deadline many times */
while (proc_table[p].avail_time <= 0) {
ADDUSEC2TIMESPEC(lev->period[p], &lev->cbs_dline[p]);
proc_table[p].avail_time += proc_table[p].wcet;
 
#ifdef TESTG
if (starttime && p == 3) {
oldx = x;
x = ((lev->cbs_dline[p].tv_sec*1000000+lev->cbs_dline[p].tv_nsec/1000)/5000 - starttime) + 20;
// kern_printf("(e%d avail%d)",lev->cbs_dline[p].tv_sec*1000000+lev->cbs_dline[p].tv_nsec/1000,proc_table[p].avail_time);
if (oldx > x) kern_raise(XUNSPECIFIED_EXCEPTION, exec_shadow);
if (x<640)
grx_plot(x, 15, 2);
}
#endif
}
}
 
 
/* this is the periodic reactivation of the task... it is posted only
if the task is a periodic task */
static void CBS_timer_reactivate(void *par)
{
PID p = (PID) par;
CBS_level_des *lev;
 
lev = (CBS_level_des *)level_table[proc_table[p].task_level];
 
#ifdef CBS_COUNTER
if (p==5) cbs_counter++;
#endif
 
if (lev->flag[p] & CBS_SLEEP && proc_table[p].status == CBS_IDLE) {
proc_table[p].status = SLEEP;
proc_table[p].avail_time = proc_table[p].wcet;
NULL_TIMESPEC(&lev->cbs_dline[p]);
return;
}
 
if (proc_table[p].status == CBS_IDLE) {
/* the task has finished the current activation and must be
reactivated */
CBS_activation(lev,p,&lev->reactivation_time[p]);
 
event_need_reschedule();
}
else if (lev->flag[p] & CBS_SAVE_ARRIVALS)
/* the task has not completed the current activation, so we save
the activation incrementing nact... */
lev->nact[p]++;
 
/* repost the event at the next period end... */
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
CBS_timer_reactivate,
(void *)p);
#ifdef CBS_COUNTER
if (p==5) cbs_counter2++;
#endif
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
 
}
 
/*+ this function is called when a killed or ended task reach the
period end +*/
static void CBS_timer_zombie(void *par)
{
PID p = (PID) par;
CBS_level_des *lev;
 
lev = (CBS_level_des *)level_table[proc_table[p].task_level];
 
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
 
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
 
}
 
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int CBS_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int CBS_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
SOFT_TASK_MODEL *soft;
 
if (m->pclass != SOFT_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
soft = (SOFT_TASK_MODEL *)m;
if (!(soft->met && soft->period)) return -1;
 
soft = (SOFT_TASK_MODEL *)m;
 
if (lev->flags & CBS_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / soft->period) * soft->met;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b)
lev->U += b;
else
return -1;
}
 
/* Enable wcet check */
proc_table[p].avail_time = soft->met;
proc_table[p].wcet = soft->met;
proc_table[p].control |= CONTROL_CAP;
 
lev->nact[p] = 0;
lev->period[p] = soft->period;
NULL_TIMESPEC(&lev->cbs_dline[p]);
 
if (soft->periodicity == APERIODIC)
lev->flag[p] = CBS_APERIODIC;
else
lev->flag[p] = 0;
 
if (soft->arrivals == SAVE_ARRIVALS)
lev->flag[p] |= CBS_SAVE_ARRIVALS;
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void CBS_public_detach(LEVEL l, PID p)
{
/* the CBS level doesn't introduce any dinamic allocated new field.
we have only to decrement the allocated bandwidth */
 
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
if (lev->flags & CBS_ENABLE_GUARANTEE) {
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet;
}
}
 
static int CBS_public_eligible(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
JOB_TASK_MODEL job;
 
/* we have to check if the deadline and the wcet are correct...
if the CBS level schedules in background with respect to others
levels, there can be the case in witch a task is scheduled by
schedule_time > CBS_deadline; in this case (not covered in the
article because if there is only the standard scheduling policy
this never apply) we reassign the deadline */
 
if ( TIMESPEC_A_LT_B(&lev->cbs_dline[p], &schedule_time) ) {
/* we kill the current activation */
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level, p);
 
/* we modify the deadline ... */
TIMESPEC_ASSIGN(&lev->cbs_dline[p], &schedule_time);
ADDUSEC2TIMESPEC(lev->period[p], &lev->cbs_dline[p]);
 
/* and the capacity */
proc_table[p].avail_time = proc_table[p].wcet;
/* and, finally, we reinsert the task in the master level */
job_task_default_model(job, lev->cbs_dline[p]);
job_task_def_noexc(job);
level_table[ lev->scheduling_level ]->
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job);
 
return -1;
}
 
return 0;
}
 
static void CBS_public_dispatch(LEVEL l, PID p, int nostop)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
level_table[ lev->scheduling_level ]->
private_dispatch(lev->scheduling_level,p,nostop);
}
 
static void CBS_public_epilogue(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
JOB_TASK_MODEL job;
 
/* check if the wcet is finished... */
if ( proc_table[p].avail_time <= 0) {
/* we kill the current activation */
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level, p);
 
/* we modify the deadline according to rule 4 ... */
CBS_avail_time_check(lev, p);
 
/* and, finally, we reinsert the task in the master level */
job_task_default_model(job, lev->cbs_dline[p]);
job_task_def_noexc(job);
level_table[ lev->scheduling_level ]->
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job);
// kern_printf("epil : dl %d per %d p %d |\n",
// lev->cbs_dline[p].tv_nsec/1000,lev->period[p],p);
 
}
else
/* the task has been preempted. it returns into the ready queue by
calling the guest_epilogue... */
level_table[ lev->scheduling_level ]->
private_epilogue(lev->scheduling_level,p);
}
 
static void CBS_public_activate(LEVEL l, PID p, struct timespec *t)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
if (lev->flag[p] & CBS_SLEEP) {
lev->flag[p] &= ~CBS_SLEEP;
if (proc_table[p].status != SLEEP) return;
}
 
/* save activation (only if needed... */
if (proc_table[p].status != SLEEP) {
if (lev->flag[p] & CBS_SAVE_ARRIVALS)
lev->nact[p]++;
return;
}
 
CBS_activation(lev, p, t);
 
/* Set the reactivation timer */
if (!(lev->flag[p] & CBS_APERIODIC))
{
/* we cannot use the deadline computed by CBS_activation because
the deadline may be != from actual_time + period
(if we call the task_activate after a task_sleep, and the
deadline was postponed a lot...) */
TIMESPEC_ASSIGN(&lev->reactivation_time[p], t);
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
CBS_timer_reactivate,
(void *)p);
#ifdef CBS_COUNTER
if (p==5) cbs_counter2++;
#endif
}
// kern_printf("act : %d %d |",lev->cbs_dline[p].tv_nsec/1000,p);
}
 
static void CBS_public_unblock(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
struct timespec acttime;
 
kern_gettime(&acttime);
 
CBS_activation(lev,p,&acttime);
}
 
static void CBS_public_block(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
CBS_avail_time_check(lev, p);
 
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
}
 
static int CBS_public_message(LEVEL l, PID p, void *m)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
switch((long)(m)) {
 
case (long)(NULL):
 
/* check if the wcet is finished... */
CBS_avail_time_check(lev, p);
 
if (lev->nact[p]) {
/* continue!!!! */
lev->nact[p]--;
level_table[ lev->scheduling_level ]->
private_epilogue(lev->scheduling_level,p);
} else {
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
 
if (lev->flag[p] & CBS_APERIODIC)
proc_table[p].status = SLEEP;
else /* the task is soft_periodic */
proc_table[p].status = CBS_IDLE;
}
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
break;
 
case 1:
 
lev->flag[p] |= CBS_SLEEP;
TRACER_LOGEVENT(FTrace_EVT_task_disable,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
break;
 
}
 
return 0;
 
}
 
static void CBS_public_end(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
CBS_avail_time_check(lev, p);
 
level_table[ lev->scheduling_level ]->
private_extract(lev->scheduling_level,p);
 
/* we delete the reactivation timer */
if (!(lev->flag[p] & CBS_APERIODIC)) {
kern_event_delete(lev->reactivation_timer[p]);
lev->reactivation_timer[p] = -1;
}
 
/* Finally, we post the zombie event. when the end period is reached,
the task descriptor and banwidth are freed */
proc_table[p].status = CBS_ZOMBIE;
lev->reactivation_timer[p] = kern_event_post(&lev->cbs_dline[p],
CBS_timer_zombie,
(void *)p);
}
 
/* Registration functions */
 
/*+ Registration function:
int flags the init flags ... see CBS.h +*/
LEVEL CBS_register_level(int flags, LEVEL master)
{
LEVEL l; /* the level that we register */
CBS_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("CBS_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(CBS_level_des));
 
lev = (CBS_level_des *)level_table[l];
 
/* fill the standard descriptor */
if (flags & CBS_ENABLE_GUARANTEE)
lev->l.public_guarantee = CBS_public_guarantee;
else
lev->l.public_guarantee = NULL;
lev->l.public_create = CBS_public_create;
lev->l.public_detach = CBS_public_detach;
lev->l.public_end = CBS_public_end;
lev->l.public_eligible = CBS_public_eligible;
lev->l.public_dispatch = CBS_public_dispatch;
lev->l.public_epilogue = CBS_public_epilogue;
lev->l.public_activate = CBS_public_activate;
lev->l.public_unblock = CBS_public_unblock;
lev->l.public_block = CBS_public_block;
lev->l.public_message = CBS_public_message;
 
/* fill the CBS descriptor part */
for (i=0; i<MAX_PROC; i++) {
NULL_TIMESPEC(&lev->cbs_dline[i]);
lev->period[i] = 0;
NULL_TIMESPEC(&lev->reactivation_time[i]);
lev->reactivation_timer[i] = -1;
lev->nact[i] = 0;
lev->flag[i] = 0;
}
 
 
lev->U = 0;
 
lev->scheduling_level = master;
 
lev->flags = flags;
 
return l;
}
 
bandwidth_t CBS_usedbandwidth(LEVEL l)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
return lev->U;
}
 
int CBS_get_nact(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
return lev->nact[p];
}
 
/shark/trunk/modules/cbs/subdir.mk
0,0 → 1,0
OBJS += cbs/cbs.o
/shark/trunk/modules/pi/pi/pi.h
0,0 → 1,100
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: pi.h,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
This file contains the Priority Inheritance (PI) Protocol
 
Title:
PI (Priority Inheritance protocol)
 
Resource Models Accepted:
None
 
Description:
This module implement the Priority Inheritance Protocol.
The priority inheritance is made using the shadow field of the
task descriptor. No difference is made upon the task model of the
tasks that use PI mutexes.
 
A PI mutex is created passing the PI_mutexattr structure to mutex_init.
 
Exceptions raised:
XMUTEX_OWNER_KILLED
This exception is raised when a task ends and it owns one or more
mutexes
 
Restrictions & special features:
- This module is NOT Posix compliant
- This module can manage any number of PI mutexes.
- If a task ends (because it reaches the end of the body or because it
is killed by someone) and it owns some mutex, an exception is raised.
- if a mutex unlock is called on a mutex not previously
locked or previously locked by another task an exception is raised
- A PI mutex can be statically allocated. To do this, the init function
have to define a macro that puts this information in the mutex
descriptor: mutexlevel = <PI resource level>; opt = NULL;
for example, if the PI module is registered at level 1, the macro is
like:
#define MUTEX_INITIALIZER {1,NULL}
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
 
#ifndef __PI_H__
#define __PI_H__
 
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
RLEVEL PI_register_module(void);
 
__END_DECLS
#endif
/shark/trunk/modules/pi/pi.c
0,0 → 1,338
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: pi.c,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
Priority Inhertitance protocol. see pi.h for more details...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <pi/pi/pi.h>
 
#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
/* The PI resource level descriptor */
typedef struct {
mutex_resource_des m; /*+ the mutex interface +*/
 
int nlocked[MAX_PROC]; /*+ how many mutex a task currently locks +*/
 
PID blocked[MAX_PROC]; /*+ blocked queue ... +*/
} PI_mutex_resource_des;
 
 
/* this is the structure normally pointed by the opt field in the
mutex_t structure */
typedef struct {
PID owner;
int nblocked;
PID firstblocked;
} PI_mutex_t;
 
 
 
#if 0
/*+ print resource protocol statistics...+*/
static void PI_resource_status(RLEVEL r)
{
PI_mutex_resource_des *m = (PI_mutex_resource_des *)(resource_table[r]);
PID i;
 
kern_printf("Resources owned by the tasks:\n");
for (i=0; i<MAX_PROC; i++) {
kern_printf("%-4d", m->nlocked[i]);
}
}
#endif
 
static int PI_res_register(RLEVEL l, PID p, RES_MODEL *r)
{
/* priority inheritance works with all tasks without Resource parameters */
return -1;
}
 
static void PI_res_detach(RLEVEL l, PID p)
{
PI_mutex_resource_des *m = (PI_mutex_resource_des *)(resource_table[l]);
 
if (m->nlocked[p])
kern_raise(XMUTEX_OWNER_KILLED, p);
}
 
static int PI_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
{
PI_mutex_t *p;
 
if (a->mclass != PI_MCLASS)
return -1;
 
p = (PI_mutex_t *) kern_alloc(sizeof(PI_mutex_t));
 
/* control if there is enough memory; no control on init on a
non- destroyed mutex */
 
if (!p)
return (ENOMEM);
 
p->owner = NIL;
p->nblocked = 0;
p->firstblocked = NIL;
 
m->mutexlevel = l;
m->opt = (void *)p;
 
return 0;
}
 
 
static int PI_destroy(RLEVEL l, mutex_t *m)
{
// PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]);
SYS_FLAGS f;
if ( ((PI_mutex_t *)m->opt)->nblocked)
return (EBUSY);
 
f = kern_fsave();
if (m->opt) {
kern_free(m->opt,sizeof(PI_mutex_t));
m->opt = NULL;
}
kern_frestore(f);
 
return 0;
}
 
/* Note that in this approach, when unlocking we can't wake up only
one thread, but we have to wake up all the blocked threads, because there
is not a concept of priority between the task... Each woken thread have
to retest he condition.
Normally, they retest it only one time, because if many threads are
unblocked, they are scheduled basing on their priority (unkown in this
module!)... and if the slice is greather than the critical sections,
they never block!
*/
static int PI_lock(RLEVEL l, mutex_t *m)
{
PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]);
PI_mutex_t *p;
SYS_FLAGS f;
// return 0;
 
f = kern_fsave();
 
p = (PI_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, initialize it! */
PI_mutexattr_t a;
PI_mutexattr_default(a);
PI_init(l, m, &a);
}
 
 
if (p->owner == exec_shadow) {
/* the task already owns the mutex */
kern_frestore(f);
return (EDEADLK);
}
 
while (p->owner != NIL) {
/* the mutex is locked by someone, "block" the task ...*/
proc_table[exec_shadow].shadow = p->owner;
lev->blocked[exec_shadow] = p->firstblocked;
p->firstblocked = exec_shadow;
p->nblocked++;
// kern_printf("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
/* ... call the scheduler... */
scheduler();
TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
kern_context_load(proc_table[exec_shadow].context);
 
/* ... and reaquire the cli() before the test... */
kern_cli();
}
 
/* the mutex is free, We can lock it! */
lev->nlocked[exec_shadow]++;
 
p->owner = exec_shadow;
 
kern_frestore(f);
 
return 0;
}
 
static int PI_trylock(RLEVEL l, mutex_t *m)
{
PI_mutex_t *p;
SYS_FLAGS f;
 
f = kern_fsave();
 
p = (PI_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, initialize it! */
PI_mutexattr_t a;
PI_mutexattr_default(a);
PI_init(l, m, &a);
}
 
if (p->owner != NIL) {
/* a task already owns the mutex */
kern_frestore(f);
return (EBUSY);
}
else {
/* the mutex is free */
PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]);
lev->nlocked[exec_shadow]++;
 
p->owner = exec_shadow;
 
kern_frestore(f);
return 0;
}
}
 
static int PI_unlock(RLEVEL l, mutex_t *m)
{
PI_mutex_resource_des *lev;
PI_mutex_t *p;
int i, j;
 
// return 0;
p = (PI_mutex_t *)m->opt;
if (!p)
return (EINVAL);
 
if (p->owner != exec_shadow) {
/* the mutex is owned by another task!!! */
kern_sti();
return (EPERM);
}
 
proc_table[exec_shadow].context = kern_context_save();
 
/* the mutex is mine */
lev = (PI_mutex_resource_des *)(resource_table[l]);
lev->nlocked[exec_shadow]--;
 
p->owner = NIL;
 
/* we unblock all the waiting tasks... */
i = p->firstblocked;
p->firstblocked = NIL;
 
while (i != NIL) {
// kern_printf("<<%d>>", i);
proc_table[i].shadow = j = i;
i = lev->blocked[i];
lev->blocked[j] = NIL;
}
p->nblocked = 0;
 
/* {
int xxx;
kern_printf("(PI_unlock owner=%d ",p->owner);
for (xxx = 0; xxx<5; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow);
kern_printf(")\n");
}*/
 
scheduler();
TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context);
kern_context_load(proc_table[exec_shadow].context);
 
return 0;
}
 
RLEVEL PI_register_module(void)
{
RLEVEL l; /* the level that we register */
PI_mutex_resource_des *m; /* for readableness only */
PID i; /* a counter */
 
printk("PI_register_module\n");
 
/* request an entry in the level_table */
l = resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m = (PI_mutex_resource_des *)kern_alloc(sizeof(PI_mutex_resource_des));
 
/* update the level_table with the new entry */
resource_table[l] = (resource_des *)m;
 
/* fill the resource_des descriptor */
m->m.r.rtype = MUTEX_RTYPE;
m->m.r.res_register = PI_res_register;
m->m.r.res_detach = PI_res_detach;
 
/* fill the mutex_resource_des descriptor */
m->m.init = PI_init;
m->m.destroy = PI_destroy;
m->m.lock = PI_lock;
m->m.trylock = PI_trylock;
m->m.unlock = PI_unlock;
 
/* fille the PI_mutex_resource_des descriptor */
for (i=0; i<MAX_PROC; i++) {
m->nlocked[i] = 0;
m->blocked[i] = NIL;
}
return l;
}
 
/shark/trunk/modules/pi/subdir.mk
0,0 → 1,0
OBJS += pi/pi.o
/shark/trunk/modules/cabs/cabs/cabs.h
0,0 → 1,93
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: cabs.h,v 1.1 2005-02-25 10:53:41 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:53:41 $
------------
 
This file contains the Hartik 3.3.1 CAB functions
 
 
Author: Gerardo Lamastra
Giuseppe Lipari
Date: 9/5/96
 
File: Cabs.H
Revision: 1.1
Date: 14/3/97
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
/* $HEADER+ */
 
#ifndef __CAB_H__
#define __CAB_H__
 
#include <ll/sys/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
#define MAX_CAB_NAME 10 /*+ Max. n. of chars for a CAB +*/
 
#define MAX_CAB 50U /*+ Maximum number of CABs +*/
 
 
typedef int CAB;
 
/*+ This function must be inserted in the __hartik_register_levels__ +*/
void CABS_register_module(void);
 
/* User level CAB primitives */
CAB cab_create(char *name, int dim_mes, BYTE num_mes);
char *cab_reserve(CAB id);
int cab_putmes(CAB id, void *pbuf);
char *cab_getmes(CAB id);
int cab_unget(CAB id, void *pun_mes);
void cab_delete(CAB id);
 
__END_DECLS
#endif /* __CAB_H__ */
 
/shark/trunk/modules/cabs/cabs.c
0,0 → 1,315
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: cabs.c,v 1.1 2005-02-25 10:53:41 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:53:41 $
------------
 
Date: 2/7/96
 
File: Cabs.C
Translated by : Giuseppe Lipari
Revision: 1.1
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <cabs/cabs/cabs.h>
 
#include <kernel/config.h>
#include <ll/ll.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <errno.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/* "cab": contiene dei buffer per memorizzare messaggi */
/* "cab_data": buffer contenente il messaggio e informazioni relative */
/* "messaggio": si trova a partire da (*cab_data + 1) */
 
struct cab_data { /* struttura del buffer di cab */
struct cab_data *next; /* successivo buffer del cab */
unsigned short n_uso; /* processi che usano il buffer */
};
 
struct cab_desc { /* struttura descrittore di cab */
char name[MAX_CAB_NAME+1]; /* nome del CAB */
CAB next_cab_free; /* indice successivo cab libero */
BYTE busy; /* cab libero/occcupato */
char *mem_cab; /* memoria globale cab */
BYTE n_buf; /* numero dei buffer nel cab */
BYTE nfree; /* numero buffer liberi */
unsigned dim_mes; /* dimensione del messaggio */
struct cab_data *free; /* puntatore primo buffer libero*/
struct cab_data *mrd; /* puntatore must_recent_data */
};
 
static struct cab_desc cabs[MAX_CAB]; /* vettore descrittori dei CAB */
static CAB free_cab; /* indice del primo cab libero */
 
static int checkcab(CAB id)
{
if (id >= MAX_CAB) {
errno = ECAB_INVALID_ID;
return -1;
}
if (cabs[id].busy == TRUE) return TRUE;
else errno = ECAB_CLOSED;
return -1;
}
 
/*----------------------------------------------------------------------*/
/* cab_init -- inizializza le strutture dei cab */
/*----------------------------------------------------------------------*/
void CABS_register_module(void)
{
int i;
 
free_cab = 0;
for (i=0; i < MAX_CAB - 1; i++) {
cabs[i].next_cab_free = i+1;
cabs[i].busy = FALSE;
}
cabs[MAX_CAB-1].next_cab_free = NIL;
cabs[MAX_CAB-1].busy = FALSE;
// for (i = CAB_INVALID_MSG_NUM; i <= CAB_CLOSED; i++)
// exc_set(i,cab_exception);
}
 
/*----------------------------------------------------------------------*/
/* cab_create -- crea un cab, lo inizializza e restituisce l'indice */
/*----------------------------------------------------------------------*/
CAB cab_create(char *name, int dim_mes, BYTE num_mes)
{
CAB id; /* indice del cab da restituire */
struct cab_desc *pid; /* puntatore al cab (velocizza accesso) */
char *mem; /* puntatore di appoggio al buffer */
struct cab_data *tmp; /* puntatore di scorrimento lista cab */
int i; /* variabile indice */
SYS_FLAGS f;
f = kern_fsave();
 
/* Se non ci sono piu' cab liberi o il parametro num_mes < 1 */
/* solleva l'eccezioni */
 
if (num_mes < 1) {
errno = ECAB_INVALID_MSG_NUM;
kern_frestore(f);
return -1;
}
if ((id=free_cab) != MAX_CAB) {
pid = &cabs[id]; /* prendo l'indirizzo del cab */
free_cab = pid->next_cab_free;
}
else {
errno = ECAB_NO_MORE_ENTRY;
kern_frestore(f);
return -1;
}
 
/* richiede un identificatore e la memoria */
mem = kern_alloc((dim_mes + sizeof(struct cab_data)) * num_mes);
 
kern_frestore(f);
/* inizializzazione del descrittore del cab */
 
strcpy(pid->name, name);
pid->mem_cab = mem;
pid->dim_mes = dim_mes;
pid->n_buf = num_mes;
 
/* inizializzazione primo messaggio e buffer liberi */
 
pid->mrd = (struct cab_data *)mem;
i = (int)num_mes;
tmp = NULL;
while (i--) {
tmp = (struct cab_data *)mem;
mem += sizeof(struct cab_data) + dim_mes;
tmp->next = (struct cab_data *)mem;
tmp->n_uso = 0;
}
 
tmp->next = NULL;
pid->free = pid->mrd->next;
 
mem = (char *)(pid->mrd + 1);
for (i=0; i<dim_mes; i++) *(mem++) = 0;
pid->nfree = num_mes - 1;
 
f = kern_fsave();
pid->busy = TRUE;
kern_frestore(f);
 
return(id);
}
 
/*----------------------------------------------------------------------*/
/* cab_reserve -- richiede un buffer in cui mettere i dati da inviare */
/* ritorna un puntatore al buffer */
/*----------------------------------------------------------------------*/
char *cab_reserve(CAB id)
{
struct cab_desc *pid;
char *buf;
SYS_FLAGS f;
 
/* controlla l'identificatore del CAB */
if (checkcab(id) == -1) return(NULL);
 
pid = &cabs[id];
f = kern_fsave();
/* Se il numero di elementi assegnati non e` = al massimo */
if ((pid->nfree)--) {
buf = (char *)(pid->free + 1);
pid->free = pid->free->next;
kern_frestore(f);
return(buf);
}
else {
errno = ECAB_TOO_MUCH_MSG;
kern_frestore(f);
return(NULL);
}
}
 
/*----------------------------------------------------------------------*/
/* cab_putmes -- immette un nuovo messaggio nel cab */
/*----------------------------------------------------------------------*/
int cab_putmes(CAB id, void *pbuf)
{
struct cab_data *pold;
struct cab_desc *pid;
SYS_FLAGS f;
 
if (checkcab(id) == -1) return -1;
pid = &cabs[id];
 
f = kern_fsave();
pold = pid->mrd;
if (pold->n_uso == 0) {
pold->next = pid->free;
pid->free = pold;
(pid->nfree)++;
}
 
pid->mrd = ((struct cab_data *)pbuf) - 1;
kern_frestore(f);
return 1;
}
 
/*----------------------------------------------------------------------*/
/* cab_getmes -- richiede l'ultimo messaggio presente nel cab. */
/* Ritorna un puntatore al buffer piu' recente */
/*----------------------------------------------------------------------*/
char *cab_getmes(CAB id)
{
char *tmp;
SYS_FLAGS f;
 
if (checkcab(id) == -1) return(NULL);
f = kern_fsave();
 
/* cabs[id].mrd punta all'ultimo buffer inserito, incremento */
/* il puntatore di uno e ottengo l'area del messaggio, converto */
/* quindi il puntatore al tipo carattere. Segue l'incremento */
/* del campo contatore di uso buffer */
 
(cabs[id].mrd->n_uso)++;
tmp = (char *)(cabs[id].mrd + 1);
 
kern_frestore(f);
return(tmp);
}
 
/*----------------------------------------------------------------------*/
/* cab_unget -- segnala che il task non usa piu' il messaggio, */
/* se questo non e' piu' usato da nessuno viene rilasciato */
/* ritorna un risultato */
/*----------------------------------------------------------------------*/
int cab_unget(CAB id, void*pun_mes)
/* small id; indice del cab di lavoro */
/* char *pun_mes; puntatore al messaggio */
{
struct cab_data *pbuf;
struct cab_desc *pid;
SYS_FLAGS f;
 
if (checkcab(id) == -1) return -1;
pid = &cabs[id];
 
f = kern_fsave();
pbuf = ((struct cab_data *)(pun_mes)) - 1;
 
if ((--(pbuf->n_uso) == 0) && (pbuf != pid->mrd)) {
pbuf->next = pid->free;
pid->free = pbuf;
(pid->nfree)++;
}
kern_frestore(f);
return 1;
}
 
/*----------------------------------------------------------------------*/
/* cab_delete -- libera la memoria e rilascia il cab */
/*----------------------------------------------------------------------*/
void cab_delete(CAB id)
{
struct cab_desc *pid;
SYS_FLAGS f;
 
pid = &cabs[id];
f = kern_fsave();
kern_free(pid->mem_cab,(pid->dim_mes + sizeof(struct cab_data))*pid->n_buf);
 
pid->busy = FALSE;
pid->next_cab_free = free_cab;
free_cab = id;
kern_frestore(f);
}
/shark/trunk/modules/cabs/subdir.mk
0,0 → 1,0
OBJS += cabs/cabs.o
/shark/trunk/modules/nopm/nopm/nopm.h
0,0 → 1,69
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: nopm.h,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
This file contains the No Protocol Multiple lock (NOPM) implementation
of mutexes.
It is like NOP (see nop.h) but the owner of the mutex can issue multiple
lock/unlock on mutex.
**/
 
/*
* Copyright (C) 2000 Massimiliano Giorgi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
 
#ifndef __NOPM_H__
#define __NOPM_H__
 
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
RLEVEL NOPM_register_module(void);
 
__END_DECLS
#endif
/shark/trunk/modules/nopm/nopm.c
0,0 → 1,355
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: nopm.c,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
See modules/nopm.h.
This code is a copy of nop.c with minor modifications.
**/
 
/*
* Copyright (C) 2000 Massimiliano Giorgi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <nopm/nopm/nopm.h>
 
#include <ll/ll.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/* The NOPM resource level descriptor */
typedef struct {
mutex_resource_des m; /*+ the mutex interface +*/
} NOPM_mutex_resource_des;
 
 
/* this is the structure normally pointed by the opt field in the
mutex_t structure */
typedef struct {
PID owner;
IQUEUE blocked;
int counter;
} NOPM_mutex_t;
 
 
 
 
 
 
 
 
 
#define MAXTABLE 4096
static mutex_t *table[MAXTABLE];
static int index=0;
 
static int register_nopm(mutex_t *p)
{
if (index>=MAXTABLE) return -1;
table[index++]=p;
return 0;
}
 
void dump_nopm_table(void)
{
NOPM_mutex_t *ptr;
SYS_FLAGS f;
PID j;
int i;
 
f=kern_fsave();
kern_printf("nopm_mutex module TABLE\n");
kern_printf("----------------------\n");
for(i=0;i<index;i++) {
ptr=table[i]->opt;
if (!iq_isempty(&ptr->blocked)) {
kern_printf("%i blocks on 0x%p: ",ptr->owner,table[i]);
j=iq_query_first(&ptr->blocked);
while (j!=NIL) {
kern_printf("%i ",(int)j);
j=iq_query_next(j, &ptr->blocked);
}
kern_printf("\n");
} else {
//kern_printf("0x%p no block\n",table[i]);
}
}
kern_frestore(f);
 
}
 
 
 
 
 
 
 
 
 
 
 
/* Wait status for this library */
#define NOPM_WAIT LIB_STATUS_BASE
 
 
 
static int NOPM_res_register(RLEVEL l, PID p, RES_MODEL *r)
{
return -1;
}
 
static void NOPM_res_detach(RLEVEL l, PID p)
{
}
 
static int NOPM_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
{
NOPM_mutex_t *p;
 
if (a->mclass != NOPM_MCLASS)
return -1;
 
p = (NOPM_mutex_t *) kern_alloc(sizeof(NOPM_mutex_t));
 
/* control if there is enough memory; no control on init on a
non- destroyed mutex */
 
if (!p)
return (ENOMEM);
 
p->owner = NIL;
iq_init(&p->blocked, &freedesc, 0);
p->counter=0;
m->mutexlevel = l;
m->opt = (void *)p;
/* MG */
register_nopm(m);
return 0;
}
 
 
static int NOPM_destroy(RLEVEL l, mutex_t *m)
{
// NOPM_mutex_resource_des *lev = (NOPM_mutex_resource_des *)(resource_table[l]);
SYS_FLAGS f;
 
if ( ((NOPM_mutex_t *)m->opt)->owner != NIL)
return (EBUSY);
 
f = kern_fsave();
if (m->opt) {
kern_free(m->opt,sizeof(NOPM_mutex_t));
m->opt = NULL;
}
kern_frestore(f);
 
return 0;
}
 
static int NOPM_lock(RLEVEL l, mutex_t *m)
{
NOPM_mutex_t *p;
SYS_FLAGS f;
 
f = kern_fsave();
 
p = (NOPM_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, initialize it! */
NOPM_mutexattr_t a;
NOPM_mutexattr_default(a);
NOPM_init(l, m, &a);
}
 
if (p->owner == exec_shadow) {
/* the task already owns the mutex */
p->counter++;
kern_frestore(f);
return 0;
}
 
if (p->owner != NIL) { /* We must block exec task */
LEVEL l; /* for readableness only */
proc_table[exec_shadow].context = kern_context_save();
kern_epilogue_macro();
l = proc_table[exec_shadow].task_level;
level_table[l]->public_block(l,exec_shadow);
 
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = NOPM_WAIT;
iq_insertlast(exec_shadow,&p->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
kern_context_load(proc_table[exec_shadow].context);
}
else {
/* the mutex is free, We can lock it! */
p->owner = exec_shadow;
p->counter++;
kern_frestore(f);
}
 
return 0;
}
 
static int NOPM_trylock(RLEVEL l, mutex_t *m)
{
NOPM_mutex_t *p;
SYS_FLAGS f;
 
f = kern_fsave();
 
p = (NOPM_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, initialize it! */
NOPM_mutexattr_t a;
NOPM_mutexattr_default(a);
NOPM_init(l, m, &a);
}
 
if (p->owner != NIL) {
/* a task already owns the mutex */
kern_frestore(f);
return (EBUSY);
}
else {
/* the mutex is free, We can lock it! */
p->owner = exec_shadow;
p->counter++;
kern_frestore(f);
}
 
return 0;
}
 
static int NOPM_unlock(RLEVEL l, mutex_t *m)
{
NOPM_mutex_t *p;
PID e;
 
p = (NOPM_mutex_t *)m->opt;
if (!p)
return (EINVAL);
 
if (p->owner != exec_shadow) {
/* the mutex is owned by another task!!! */
kern_printf("wrongunlock<owner=%i,unlocker=%i>",p->owner,exec_shadow);
kern_sti();
return (EPERM);
}
 
p->counter--;
if (p->counter!=0) {
/* we have multiple lock on this mutex */
kern_sti();
return 0;
}
proc_table[exec_shadow].context = kern_context_save();
 
/* the mutex is mine, pop the firsttask to extract */
for (;;) {
e = iq_getfirst(&p->blocked);
if (e == NIL) {
p->owner = NIL;
break;
} else if (proc_table[e].status == NOPM_WAIT) {
l = proc_table[e].task_level;
level_table[l]->public_unblock(l,e);
p->counter++;
break;
}
}
 
/* MG!!! */
p->owner = e;
 
scheduler();
kern_context_load(proc_table[exec_shadow].context);
 
return 0;
}
 
RLEVEL NOPM_register_module(void)
{
RLEVEL l; /* the level that we register */
NOPM_mutex_resource_des *m; /* for readableness only */
 
printk("NOPM_register_module\n");
 
/* request an entry in the level_table */
l = resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m = (NOPM_mutex_resource_des *)kern_alloc(sizeof(NOPM_mutex_resource_des));
 
/* update the level_table with the new entry */
resource_table[l] = (resource_des *)m;
 
/* fill the resource_des descriptor */
m->m.r.rtype = MUTEX_RTYPE;
m->m.r.res_register = NOPM_res_register;
m->m.r.res_detach = NOPM_res_detach;
 
/* fill the mutex_resource_des descriptor */
m->m.init = NOPM_init;
m->m.destroy = NOPM_destroy;
m->m.lock = NOPM_lock;
m->m.trylock = NOPM_trylock;
m->m.unlock = NOPM_unlock;
 
return l;
}
 
/shark/trunk/modules/nopm/subdir.mk
0,0 → 1,0
OBJS += nopm/nopm.o
/shark/trunk/modules/makefile
0,0 → 1,16
ifndef BASE
BASE=..
endif
 
include $(BASE)/config/config.mk
 
LIBRARY = modules
 
OBJS_PATH = $(BASE)/modules
 
include $(wildcard */subdir.mk)
 
OTHERINCL += -I$(BASE)/modules
 
include $(BASE)/config/lib.mk
 
/shark/trunk/modules/rm/rm.c
0,0 → 1,772
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors:
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* Anton Cervin
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: rm.c,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
This file contains the scheduling module RM (rate-/deadline-monotonic)
 
Read rm.h for further details.
 
**/
 
/*
* Copyright (C) 2000,2002 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <rm/rm/rm.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <tracer.h>
 
//#define RM_DEBUG
#define rm_printf kern_printf
#ifdef RM_DEBUG
char *pnow() {
static char buf[40];
struct timespec t;
sys_gettime(&t);
sprintf(buf, "%ld.%06ld", t.tv_sec, t.tv_nsec/1000);
return buf;
}
char *ptime1(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
char *ptime2(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
#endif
 
/* Statuses used in the level */
#define RM_READY MODULE_STATUS_BASE /* ready */
#define RM_IDLE MODULE_STATUS_BASE+1 /* idle, waiting for offset/eop */
#define RM_WAIT MODULE_STATUS_BASE+2 /* to sleep, waiting for eop */
#define RM_ZOMBIE MODULE_STATUS_BASE+3 /* to free, waiting for eop */
 
/* Task flags */
#define RM_FLAG_SPORADIC 1 /* the task is sporadic */
#define RM_FLAG_SPOR_LATE 2 /* sporadic task with period overrun */
 
 
/* Task descriptor */
typedef struct {
int flags; /* task flags */
TIME period; /* period (or inter-arrival interval) */
TIME rdeadline; /* relative deadline */
TIME offset; /* release offset */
struct timespec release; /* release time of current instance */
struct timespec adeadline; /* latest assigned deadline */
int dl_timer; /* deadline timer */
int eop_timer; /* end of period timer */
int off_timer; /* offset timer */
int dl_miss; /* deadline miss counter */
int wcet_miss; /* WCET miss counter */
int act_miss; /* activation miss counter */
int nact; /* number of pending periodic jobs */
} RM_task_des;
 
 
/* Level descriptor */
typedef struct {
level_des l; /* standard level descriptor */
int flags; /* level flags */
IQUEUE ready; /* the ready queue */
bandwidth_t U; /* used bandwidth */
RM_task_des tvec[MAX_PROC]; /* vector of task descriptors */
} RM_level_des;
 
 
/* Module function cross-references */
static void RM_intern_release(PID p, RM_level_des *lev);
 
 
/**** Timer event handler functions ****/
 
/* This timer event handler is called at the end of the period */
static void RM_timer_endperiod(void *par)
{
PID p = (PID) par;
RM_level_des *lev = (RM_level_des *)level_table[proc_table[p].task_level];
RM_task_des *td = &lev->tvec[p];
td->eop_timer = -1;
 
if (proc_table[p].status == RM_ZOMBIE) {
/* put the task in the FREE state */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/td->rdeadline) * proc_table[p].wcet;
return;
}
 
if (proc_table[p].status == RM_WAIT) {
proc_table[p].status = SLEEP;
return;
}
if (td->flags & RM_FLAG_SPORADIC) {
/* the task is sporadic and still busy, mark it as late */
td->flags |= RM_FLAG_SPOR_LATE;
} else {
/* the task is periodic, release/queue another instance */
RM_intern_release(p, lev);
}
}
 
/* This timer event handler is called when a task misses its deadline */
static void RM_timer_deadline(void *par)
{
PID p = (PID) par;
RM_level_des *lev = (RM_level_des *)level_table[proc_table[p].task_level];
RM_task_des *td = &lev->tvec[p];
 
td->dl_timer = -1;
 
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,
(unsigned short int)proc_table[p].context,0);
 
if (lev->flags & RM_ENABLE_DL_EXCEPTION) {
kern_raise(XDEADLINE_MISS,p);
} else {
td->dl_miss++;
}
}
 
/* This timer event handler is called after waiting for an offset */
static void RM_timer_offset(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
lev = (RM_level_des *)level_table[proc_table[p].task_level];
RM_task_des *td = &lev->tvec[p];
 
td->off_timer = -1;
 
/* release the task now */
RM_intern_release(p, lev);
}
 
/* This function is called when a guest task misses its deadline */
static void RM_timer_guest_deadline(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
lev = (RM_level_des *)level_table[proc_table[p].task_level];
RM_task_des *td = &lev->tvec[p];
 
td->dl_timer = -1;
 
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,
(unsigned short int)proc_table[p].context,0);
 
kern_raise(XDEADLINE_MISS,p);
 
}
 
 
/**** Internal utility functions ****/
 
/* Release (or queue) a task, post deadline and endperiod timers */
static void RM_intern_release(PID p, RM_level_des *lev)
{
struct timespec temp;
RM_task_des *td = &lev->tvec[p];
 
/* post deadline timer */
if (lev->flags & RM_ENABLE_DL_CHECK) {
temp = td->release;
ADDUSEC2TIMESPEC(td->rdeadline, &temp);
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
td->dl_timer = kern_event_post(&temp,RM_timer_deadline,(void *)p);
}
 
/* release or queue next job */
if (proc_table[p].status == RM_IDLE) {
/* assign deadline, insert task in the ready queue */
proc_table[p].status = RM_READY;
*iq_query_priority(p,&lev->ready) = td->rdeadline;
iq_priority_insert(p,&lev->ready);
#ifdef RM_DEBUG
rm_printf("At %s: releasing %s with deadline %s\n", pnow(),
proc_table[p].name, ptime1(&td->adeadline));
#endif
/* increase assigned deadline */
ADDUSEC2TIMESPEC(td->period, &td->adeadline);
/* reschedule */
event_need_reschedule();
} else {
/* queue */
td->nact++;
}
 
/* increase release time */
ADDUSEC2TIMESPEC(td->period, &td->release);
/* post end of period timer */
if (td->eop_timer != -1) {
kern_event_delete(td->eop_timer);
td->eop_timer = -1;
}
td->eop_timer = kern_event_post(&td->release, RM_timer_endperiod,(void *)p);
 
TRACER_LOGEVENT(FTrace_EVT_task_timer,
(unsigned short int)proc_table[p].context,
(unsigned int)proc_table[p].task_level);
}
 
 
/**** Public generic kernel interface functions ****/
 
/* Returns the first task in the ready queue */
static PID RM_public_scheduler(LEVEL l)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
return iq_query_first(&lev->ready);
}
 
/* Checks and decreases the available system bandwidth */
static int RM_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
/* Called by task_create: Checks task model and creates a task */
static int RM_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
HARD_TASK_MODEL *h;
 
if (m->pclass != HARD_PCLASS) return -1;
if (m->level != 0 && m->level != l) return -1;
h = (HARD_TASK_MODEL *)m;
if (!h->wcet || !h->mit) return -1;
if (h->drel > h->mit) return -1; /* only D <= T supported */
 
if (!h->drel) {
td->rdeadline = h->mit;
} else {
td->rdeadline = h->drel;
}
 
/* check the free bandwidth... */
if (lev->flags & RM_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / td->rdeadline) * h->wcet;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b) {
lev->U += b;
} else {
return -1;
}
}
 
td->flags = 0;
if (h->periodicity == APERIODIC) {
td->flags |= RM_FLAG_SPORADIC;
}
td->period = h->mit;
if (td->rdeadline == td->period) {
/* Ensure that D <= T-eps to make dl_timer trigger before eop_timer */
td->rdeadline = td->period - 1;
}
td->offset = h->offset;
td->dl_timer = -1;
td->eop_timer = -1;
td->off_timer = -1;
td->dl_miss = 0;
td->wcet_miss = 0;
td->act_miss = 0;
td->nact = 0;
 
/* Enable wcet check */
if (lev->flags & RM_ENABLE_WCET_CHECK) {
proc_table[p].avail_time = h->wcet;
proc_table[p].wcet = h->wcet;
proc_table[p].control |= CONTROL_CAP; /* turn on measurement */
}
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
/* Reclaim the bandwidth used by the task */
static void RM_public_detach(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
if (lev->flags & RM_ENABLE_GUARANTEE) {
lev->U -= (MAX_BANDWIDTH / td->rdeadline) * proc_table[p].wcet;
}
}
 
/* Extracts the running task from the ready queue */
static void RM_public_dispatch(LEVEL l, PID p, int nostop)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
iq_extract(p, &lev->ready);
}
 
/* Called when the task is preempted or when its budget is exhausted */
static void RM_public_epilogue(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
/* check if the wcet is finished... */
if (lev->flags & RM_ENABLE_WCET_CHECK) {
if (proc_table[p].avail_time <= 0) {
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,
(unsigned short int)proc_table[p].context,0);
if (lev->flags & RM_ENABLE_WCET_EXCEPTION) {
kern_raise(XWCET_VIOLATION,p);
} else {
proc_table[p].control &= ~CONTROL_CAP;
td->wcet_miss++;
}
}
}
 
/* the task returns to the ready queue */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
 
}
 
/* Called by task_activate or group_activate: Activates the task at time t */
static void RM_public_activate(LEVEL l, PID p, struct timespec *t)
{
struct timespec clocktime;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
kern_gettime(&clocktime);
 
/* check if we are not in the SLEEP state */
if (proc_table[p].status != SLEEP) {
if (lev->flags & RM_ENABLE_ACT_EXCEPTION) {
/* too frequent or wrongful activation: raise exception */
kern_raise(XACTIVATION,p);
} else {
/* skip the sporadic job, but increase a counter */
#ifdef RM_DEBUG
rm_printf("At %s: activation of %s skipped\n", pnow(),
proc_table[p].name);
#endif
td->act_miss++;
}
return;
}
/* set the release time to the activation time + offset */
td->release = *t;
ADDUSEC2TIMESPEC(td->offset, &td->release);
 
/* set the absolute deadline to the activation time + offset + rdeadline */
td->adeadline = td->release;
ADDUSEC2TIMESPEC(td->rdeadline, &td->adeadline);
 
/* Check if release > clocktime. If yes, release it later,
otherwise release it now. */
 
proc_table[p].status = RM_IDLE;
 
if (TIMESPEC_A_GT_B(&td->release, &clocktime)) {
/* release later, post an offset timer */
if (td->off_timer != -1) {
kern_event_delete(td->off_timer);
td->off_timer = -1;
}
td->off_timer = kern_event_post(&td->release,RM_timer_offset,(void *)p);
} else {
/* release now */
RM_intern_release(p, lev);
}
}
 
/* Reinserts a task that has been blocked into the ready queue */
static void RM_public_unblock(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
}
 
/* Called when a task experiences a synchronization block */
static void RM_public_block(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
. the deadline must remain...
 
So, we do nothing!!!
*/
}
 
/* Called by task_endcycle or task_sleep: Ends the current instance */
static int RM_public_message(LEVEL l, PID p, void *m)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
switch((long)(m)) {
/* task_endcycle() */
case 0:
/* if there are no pending jobs */
if (td->nact == 0) {
/* remove deadline timer, if any */
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
if (td->flags & RM_FLAG_SPORADIC) {
/* sporadic task */
if (!(td->flags & RM_FLAG_SPOR_LATE)) {
proc_table[p].status = RM_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
td->flags &= ~RM_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
proc_table[p].status = RM_IDLE;
}
} else {
/* we are late / there are pending jobs */
td->nact--;
/* compute and assign absolute deadline */
*iq_query_priority(p,&lev->ready) = td->rdeadline;
iq_priority_insert(p,&lev->ready);
/* increase assigned deadline */
ADDUSEC2TIMESPEC(td->period, &td->adeadline);
#ifdef RM_DEBUG
rm_printf("(Late) At %s: releasing %s with deadline %s\n",
pnow(),proc_table[p].name,ptime1(&td->adeadline));
#endif
}
break;
/* task_sleep() */
case 1:
/* remove deadline timer, if any */
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
if (td->flags & RM_FLAG_SPORADIC) {
/* sporadic task */
if (!(td->flags & RM_FLAG_SPOR_LATE)) {
proc_table[p].status = RM_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
td->flags &= ~RM_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
if (!(td->nact > 0)) {
/* we are on time. go to the RM_WAIT state */
proc_table[p].status = RM_WAIT;
} else {
/* we are late. delete pending activations and go to SLEEP */
td->nact = 0;
proc_table[p].status = SLEEP;
/* remove end of period timer */
if (td->eop_timer != -1) {
kern_event_delete(td->eop_timer);
td->eop_timer = -1;
}
}
}
break;
}
 
if (lev->flags & RM_ENABLE_WCET_CHECK) {
proc_table[p].control |= CONTROL_CAP;
}
jet_update_endcycle(); /* Update the Jet data... */
proc_table[p].avail_time = proc_table[p].wcet;
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,
(unsigned short int)proc_table[p].context,(unsigned int)l);
return 0;
 
}
 
/* End the task and free the resources at the end of the period */
static void RM_public_end(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
if (!(td->flags & RM_FLAG_SPOR_LATE)) {
/* remove the deadline timer (if any) */
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
proc_table[p].status = RM_ZOMBIE;
} else {
/* no endperiod timer will be fired, free the task now! */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/td->rdeadline) * proc_table[p].wcet;
}
}
 
/**** Private generic kernel interface functions (guest calls) ****/
 
/* Insert a guest task */
static void RM_private_insert(LEVEL l, PID p, TASK_MODEL *m)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
JOB_TASK_MODEL *job;
 
if (m->pclass != JOB_PCLASS || (m->level != 0 && m->level != l) ) {
kern_raise(XINVALID_TASK, p);
return;
}
 
job = (JOB_TASK_MODEL *)m;
 
/* Insert task in the correct position */
*iq_query_timespec(p, &lev->ready) = job->deadline;
*iq_query_priority(p, &lev->ready) = job->period;
/* THIS IS QUESTIONABLE!! relative deadline? */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
td->period = job->period;
 
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
 
if (!job->noraiseexc) {
td->dl_timer = kern_event_post(iq_query_timespec(p, &lev->ready),
RM_timer_guest_deadline,(void *)p);
}
}
 
/* Dispatch a guest task */
static void RM_private_dispatch(LEVEL l, PID p, int nostop)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* the task state is set to EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
/* Called when a guest task is preempted/out of budget */
static void RM_private_epilogue(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* the task has been preempted. it returns into the ready queue... */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
}
 
/* Extract a guest task */
static void RM_private_extract(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
if (proc_table[p].status == RM_READY)
iq_extract(p, &lev->ready);
 
/* we remove the deadline timer, because the slice is finished */
if (td->dl_timer != -1) {
kern_event_delete(td->dl_timer);
td->dl_timer = -1;
}
 
}
 
 
/**** Level registration function ****/
 
LEVEL RM_register_level(int flags)
{
LEVEL l; /* the level that we register */
RM_level_des *lev; /* for readableness only */
int i;
 
printk("RM_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor(sizeof(RM_level_des));
 
lev = (RM_level_des *)level_table[l];
 
/* fill the standard descriptor */
lev->l.private_insert = RM_private_insert;
lev->l.private_extract = RM_private_extract;
lev->l.private_dispatch = RM_private_dispatch;
lev->l.private_epilogue = RM_private_epilogue;
 
lev->l.public_scheduler = RM_public_scheduler;
if (flags & RM_ENABLE_GUARANTEE)
lev->l.public_guarantee = RM_public_guarantee;
else
lev->l.public_guarantee = NULL;
 
lev->l.public_create = RM_public_create;
lev->l.public_detach = RM_public_detach;
lev->l.public_end = RM_public_end;
lev->l.public_dispatch = RM_public_dispatch;
lev->l.public_epilogue = RM_public_epilogue;
lev->l.public_activate = RM_public_activate;
lev->l.public_unblock = RM_public_unblock;
lev->l.public_block = RM_public_block;
lev->l.public_message = RM_public_message;
 
iq_init(&lev->ready, &freedesc, 0);
 
lev->flags = flags;
if (lev->flags & RM_ENABLE_WCET_EXCEPTION) {
lev->flags |= RM_ENABLE_WCET_CHECK;
}
if (lev->flags & RM_ENABLE_DL_EXCEPTION) {
lev->flags |= RM_ENABLE_DL_CHECK;
}
 
lev->U = 0;
 
for (i=0;i<MAX_PROC;i++) {
RM_task_des *td = &lev->tvec[i];
td->flags = 0;
td->dl_timer = -1;
td->eop_timer = -1;
td->off_timer = -1;
td->dl_miss = 0;
td->wcet_miss = 0;
td->act_miss = 0;
td->nact = 0;
}
 
return l;
}
 
 
/**** Public utility functions ****/
 
/* Get the bandwidth used by the level */
bandwidth_t RM_usedbandwidth(LEVEL l)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
return lev->U;
}
 
/* Get the number of missed deadlines for a task */
int RM_get_dl_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
return td->dl_miss;
}
 
/* Get the number of execution overruns for a task */
int RM_get_wcet_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
 
return td->wcet_miss;
}
 
/* Get the number of skipped activations for a task */
int RM_get_act_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
return td->act_miss;
}
 
/* Get the current number of queued activations for a task */
int RM_get_nact(PID p)
{
LEVEL l = proc_table[p].task_level;
 
RM_level_des *lev = (RM_level_des *)(level_table[l]);
RM_task_des *td = &lev->tvec[p];
return td->nact;
}
 
/shark/trunk/modules/rm/subdir.mk
0,0 → 1,0
OBJS += rm/rm.o
/shark/trunk/modules/rm/rm/rm.h
0,0 → 1,184
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* Anton Cervin
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: rm.h,v 1.1 2005-02-25 10:55:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2005-02-25 10:55:09 $
------------
 
This file contains the scheduling module RM (rate-/deadline-monotonic)
 
Title:
RM (rate-/deadline-monotonic)
 
Task Models Accepted:
HARD_TASK_MODEL - Hard Tasks (Periodic and Sporadic)
wcet field and mit field must be != 0. They are used to set the wcet
and period of the tasks.
periodicity field can be either PERIODIC or APERIODIC
drel field must be <= mit. NOTE 1: a drel of 0 is interpreted as mit.
NOTE 2: The utilization of the task is computed as wcet/drel.
offset field specifies a release offset relative to task_activate or
group_activate.
 
Guest Models Accepted:
JOB_TASK_MODEL - a single guest task activation
Identified by an absolute deadline and a period.
period field is ignored
 
Description:
This module schedules periodic and sporadic tasks based on their
relative deadlines. The task guarantee is based on a simple
utilization approach. The utilization factor of a task is computed
as wcet/drel. (By default, drel = mit.) A periodic task must only
be activated once; subsequent activations are triggered by an
internal timer. By contrast, an sporadic task must be explicitely
activated for each instance. NO GUARANTEE is performed on guest
tasks. The guarantee must be performed by the level that inserts
guest tasks in the RM level.
 
Exceptions raised:
XUNVALID_GUEST
This level doesn't support guests of this type. When a guest
operation is called, the exception is raised.
 
The following exceptions may be raised by the module:
XDEADLINE_MISS
If a task misses its deadline and the RM_ENABLE_DL_EXCEPTION
flag is set, this exception is raised.
 
XWCET_VIOLATION
If a task executes longer than its declared wcet and the
RM_ENABLE_WCET_EXCEPTION flag is set, this exception is raised.
 
XACTIVATION
If a sporadic task is activated more often than its declared mit
and the RM_ENABLE_ACT_EXCEPTION flag is set, this exception is
raised. This exception is also raised if a periodic task is
activated while not in the SLEEP state.
 
Restrictions & special features:
 
- Relative deadlines drel <= mit may be specified.
- An offset > 0 will delay the activation of the task by the same
amount of time. To synchronize a group of tasks, assign suitable
offsets and then use the group_activate function.
- This level doesn't manage the main task.
- The level uses the priority and timespec_priority fields.
- The guest tasks don't provide the guest_endcycle function.
- At init time, the user can specify the behavior in case of
deadline and wcet overruns. The following flags are available:
 
(No flags enabled) - Deadline and wcet overruns are ignored.
Pending periodic jobs are queued and are
eventually scheduled with correct deadlines
according to their original arrival times.
Sporadic tasks that arrive to often are
simply dropped.
RM_ENABLE_DL_CHECK - When a deadline overrun occurs, the
dl_miss counter of the task is increased.
Same behavior for pending jobs as above.
RM_ENABLE_WCET_CHECK - When a wcet overrun occurs, the
wcet_miss counter of the task is increased.
Same behavior for pending jobs as above.
RM_ENABLE_DL_EXCEPTION - When a deadline overrun occurs, an
exception is raised.
RM_ENABLE_WCET_EXCEPTION - When a wcet overrun occurs, an
exception is raised.
RM_ENABLE_ACT_EXCEPTION When a periodic or sporadic task is activated
too often, an exception is raised.
 
- The functions RM_get_dl_miss, RM_get_wcet_miss, RM_get_act_miss,
and RM_get_nact can be used to find out the number of missed
deadlines, the number of wcet overruns, the number of skipped
activations, and the number of currently queued periodic activations.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __RM_H__
#define __RM_H__
 
#include <ll/ll.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <kernel/types.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/* Level flags */
#define RM_DISABLE_ALL 0
#define RM_ENABLE_GUARANTEE 1 /* Task guarantee enabled */
#define RM_ENABLE_WCET_CHECK 2 /* Wcet monitoring enabled */
#define RM_ENABLE_DL_CHECK 4 /* Deadline monitoring enabled */
#define RM_ENABLE_WCET_EXCEPTION 8 /* Wcet overrun exception enabled */
#define RM_ENABLE_DL_EXCEPTION 16 /* Deadline overrun exception enabled */
#define RM_ENABLE_ACT_EXCEPTION 32 /* Activation exception enabled */
#define RM_ENABLE_ALL 63 /* All flags enabled */
 
/* Registration function */
LEVEL RM_register_level(int flags);
 
 
/**** Public utility functions ****/
 
/* Get the bandwidth used by the level */
bandwidth_t RM_usedbandwidth(LEVEL l);
 
/* Get the number of missed deadlines for a task */
int RM_get_dl_miss(PID p);
 
/* Get the number of execution overruns for a task */
int RM_get_wcet_miss(PID p);
 
/* Get the number of skipped activations for a task */
int RM_get_act_miss(PID p);
 
/* Get the current number of queued activations for a task */
int RM_get_nact(PID p);
 
 
__END_DECLS
#endif