Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 34 → Rev 35

/shark/tags/rel_0_2/kernel/int_sem.c
0,0 → 1,178
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: int_sem.c,v 1.2 2002-11-11 08:34:08 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:34:08 $
------------
 
Internal semaphores.
 
They are different from the Posix semaphores and the mutexes because:
- internal_sem_wait is not a cancellation point
- there are no limits on the semaphores that can be created
(they works like a mutex_t...)
- the queuing policy is FIFO
- Be Careful!
they are made to be fast... so not so many controls are done!!!
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <kernel/int_sem.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/* Wait status for this library */
#define INTERNAL_SEM_WAIT LIB_STATUS_BASE
 
 
void internal_sem_init(internal_sem_t *s, int value)
{
s->count = value;
iq_init(&s->blocked,&freedesc,0);
}
 
void internal_sem_wait(internal_sem_t *s)
{
SYS_FLAGS f;
 
//kern_cli();
f = kern_fsave();
 
if (s->count) {
s->count--;
//kern_sti();
kern_frestore(f);
return;
}
else { /* We must block exec task */
LEVEL l; /* for readableness only */
TIME tx; /* a dummy TIME for timespec operations */
struct timespec ty; /* a dummy timespec for timespec operations */
 
proc_table[exec_shadow].context = kern_context_save();
/* SAME AS SCHEDULER... manage the capacity event and the load_info */
ll_gettime(TIME_EXACT, &schedule_time);
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
proc_table[exec_shadow].avail_time -= tx;
jet_update_slice(tx);
if (cap_timer != NIL) {
event_delete(cap_timer);
cap_timer = NIL;
}
l = proc_table[exec_shadow].task_level;
level_table[l]->task_extract(l,exec_shadow);
 
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = INTERNAL_SEM_WAIT;
iq_insertlast(exec_shadow,&s->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
 
ll_context_to(proc_table[exec_shadow].context);
kern_after_dispatch();
kern_frestore(f);
}
}
 
/* return 0 if the counter is decremented, -1 if not */
int internal_sem_trywait(internal_sem_t *s)
{
SYS_FLAGS f;
 
//kern_cli();
f = kern_fsave();
 
if (s->count) {
s->count--;
//kern_sti();
kern_frestore(f);
return 0;
}
 
//kern_sti();
kern_frestore(f);
return -1;
}
 
 
void internal_sem_post(internal_sem_t *s)
{
proc_table[exec_shadow].context = kern_context_save();
 
if (s->blocked.first != -1) {
register PID p;
register LEVEL l;
 
p = iq_getfirst(&s->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
 
scheduler();
}
else
s->count++;
 
kern_context_load(proc_table[exec_shadow].context);
}
 
int internal_sem_getvalue(internal_sem_t *s)
{
register int returnvalue;
SYS_FLAGS f;
 
//kern_cli();
f = kern_fsave();
if (s->blocked.first == -1)
returnvalue = s->count;
else
returnvalue = -1;
 
//kern_sti();
kern_frestore(f);
return returnvalue;
}
/shark/tags/rel_0_2/kernel/activate.c
0,0 → 1,208
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: activate.c,v 1.2 2002-10-28 07:58:19 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:58:19 $
------------
 
task_activate & group_activate
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
#include <stdarg.h>
#include <ll/ll.h>
#include <ll/stdlib.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/config.h>
#include <kernel/model.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/types.h>
#include <kernel/descr.h>
#include <errno.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <kernel/trace.h>
 
/*+
Activates a single task
+*/
int task_activate(PID p)
{
LEVEL l; /* the level of the task p */
 
/* some controls on the task p */
if (p<0 || p>=MAX_PROC) {
errno = EINVALID_TASK_ID;
return -1;
}
if (proc_table[p].status == FREE) {
errno = EINVALID_TASK_ID;
return -1;
}
 
/*+ if we are calling the runlevel functions the system is
into the global_context... we only have to call
the task_activate of the level +*/
if (calling_runlevel_func) {
SYS_FLAGS f;
f=kern_fsave();
if (proc_table[p].control & FREEZE_ACTIVATION)
proc_table[p].frozen_activations++;
else {
l = proc_table[p].task_level;
level_table[l]->task_activate(l,p);
}
kern_frestore(f);
return 0;
}
 
 
/* Begin activate */
if (ll_ActiveInt()) {
SYS_FLAGS f;
f = kern_fsave();
if (proc_table[p].control & FREEZE_ACTIVATION)
proc_table[p].frozen_activations++;
else {
l = proc_table[p].task_level;
level_table[l]->task_activate(l,p);
event_need_reschedule();
}
kern_frestore(f);
}
else {
proc_table[exec_shadow].context = kern_context_save();
 
if (proc_table[p].control & FREEZE_ACTIVATION)
proc_table[p].frozen_activations++;
else {
/* tracer stuff */
trc_logevent(TRC_ACTIVATE,&p);
l = proc_table[p].task_level;
level_table[l]->task_activate(l,p);
/* Preempt if necessary */
scheduler();
}
kern_context_load(proc_table[exec_shadow].context);
}
 
return 0;
}
 
 
/*+
Activate a group of tasks, identified by the group g
It returns -1 if the group is not valid
+*/
int group_activate(WORD g)
{
PID i; /* a counter */
register LEVEL l; /* a level value */
 
if (g == 0) {
errno = EINVALID_GROUP;
return -1;
}
 
/*+ if we are calling the runlevel functions the system is
into the global_context... we only have to call
the task_activate of the level +*/
if (calling_runlevel_func) {
SYS_FLAGS f;
f=kern_fsave();
 
for (i=0 ; i<MAX_PROC; i++)
if (proc_table[i].group == g) {
if (proc_table[i].control & FREEZE_ACTIVATION) {
proc_table[i].frozen_activations++;
continue;
}
/* tracer stuff */
trc_logevent(TRC_ACTIVATE,&i);
l = proc_table[i].task_level;
level_table[l]->task_activate(l,i);
}
 
kern_frestore(f);
return 0;
}
 
if (ll_ActiveInt()) {
SYS_FLAGS f;
f=kern_fsave();
for (i=0 ; i<MAX_PROC; i++)
if (proc_table[i].group == g) {
if (proc_table[i].control & FREEZE_ACTIVATION) {
proc_table[i].frozen_activations++;
continue;
}
/* tracer stuff */
trc_logevent(TRC_ACTIVATE,&i);
l = proc_table[i].task_level;
level_table[l]->task_activate(l,i);
}
event_need_reschedule();
kern_frestore(f);
}
else {
proc_table[exec_shadow].context = kern_context_save();
for (i=0 ; i<MAX_PROC; i++)
if (proc_table[i].group == g) {
if (proc_table[i].control & FREEZE_ACTIVATION) {
proc_table[i].frozen_activations++;
continue;
}
l = proc_table[i].task_level;
level_table[l]->task_activate(l,i);
/* tracer stuff */
trc_logevent(TRC_ACTIVATE,&i);
}
scheduler();
kern_context_load(proc_table[exec_shadow].context);
}
return 0;
}
/shark/tags/rel_0_2/kernel/blkact.c
0,0 → 1,130
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
 
/**
------------
CVS : $Id: blkact.c,v 1.2 2002-10-28 07:58:19 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:58:19 $
------------
 
block_activations & co.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
#include <stdarg.h>
#include <ll/ll.h>
#include <ll/stdlib.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/config.h>
#include <kernel/model.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/types.h>
#include <kernel/descr.h>
#include <errno.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/*+
It blocks all explicit activation of a task made with task_activate and
group_activate. These activations are registered in an internal counter,
returned by task_unblock_activation.
it returns 0 if all ok, or -1 otherwise. errno is set accordingly.
+*/
int task_block_activation(PID p)
{
SYS_FLAGS f;
 
/* some controls on the task p */
if (p<0 || p>=MAX_PROC) {
errno = EINVALID_TASK_ID;
return -1;
}
if (proc_table[p].status == FREE) {
errno = EINVALID_TASK_ID;
return -1;
}
 
f = kern_fsave();
if (!(proc_table[p].control & FREEZE_ACTIVATION)) {
proc_table[p].control |= FREEZE_ACTIVATION;
proc_table[p].frozen_activations = 0;
}
kern_frestore(f);
return 0;
}
 
/*+
It unblocks all explicit activations of a task, and returns the number of
"frozen" activations. It not call the task_activate!!!!
it returns -1 if an error occurs. errno is set accordingly.
+*/
int task_unblock_activation(PID p)
{
int result;
SYS_FLAGS f;
 
/* some controls on the task p */
if (p<0 || p>=MAX_PROC) {
errno = EINVALID_TASK_ID;
return -1;
}
if (proc_table[p].status == FREE) {
errno = EINVALID_TASK_ID;
return -1;
}
 
f = kern_fsave();
 
result = 0;
 
if (proc_table[p].control & FREEZE_ACTIVATION) {
proc_table[p].control &= ~FREEZE_ACTIVATION;
result = proc_table[p].frozen_activations;
proc_table[p].frozen_activations = 0;
}
kern_frestore(f);
 
return result;
}
/shark/tags/rel_0_2/kernel/mqueue.c
0,0 → 1,773
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: mqueue.c,v 1.2 2002-11-11 08:34:09 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:34:09 $
------------
 
POSIX message queues
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
#include <mqueue.h>
#include <ll/string.h>
#include <kernel/types.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <errno.h>
#include <stdarg.h>
#include <pthread.h>
#include <sys/types.h>
 
/* some flags... */
#define MQ_USED 1
#define MQ_NONBLOCK 2
#define MQ_NOTIFICATION_PRESENT 4
 
static int mq_once = 1;
 
struct mq_elem {
unsigned int mq_prio; /* the priority of a message */
ssize_t msglen; /* the length of a message */
int next; /* the priority queue */
};
 
/* Semaphores descriptor tables */
static struct mq_des {
char *name; /* a name */
int flags; /* flags... */
 
long maxmsg; /* maximum number of messages */
long msgsize; /* Maximum message size */
 
long count; /* Number of messages currently queued */
long start; /* first not-empty message */
 
BYTE *mq_data; /* the data... */
struct mq_elem *mq_info; /* the priorities */
int mq_first; /* the first empty message */
 
struct sigevent notification; /* the notification, valid only if the
correct bit is set */
 
/* the blocked processes queues */
IQUEUE blocked_send;
IQUEUE blocked_rcv;
 
int next; /* the mq queue */
} mq_table[MQ_OPEN_MAX];
 
 
/* this -IS- an extension to the proc_table!!! */
static struct {
int intsig; /* Normally it is =0, -1 only when a task is woken up
by a signal */
int mqdes; /* message queue on which a task is blocked (meaningless
if the task is not blocked...) */
} mqproc_table[MAX_PROC];
 
static int free_mq; /* Queue of free sem */
 
mqd_t mq_open(const char *name, int oflag, ...)
{
int i;
int found = 0;
mode_t m;
mqd_t mq;
struct mq_attr *attr;
 
kern_cli();
 
for (i = 0; i < MQ_OPEN_MAX; i++)
if (mq_table[i].flags & MQ_USED) {
if (strcmp((char*)name, mq_table[i].name) == 0) {
found = 1;
break;
}
}
if (found) {
if (oflag == (O_CREAT | O_EXCL)) {
errno = EEXIST;
kern_sti();
return -1;
} else {
kern_sti();
return i;
}
} else {
if (!(oflag & O_CREAT)) {
errno = ENOENT;
kern_sti();
return -1;
} else if (!(oflag & O_RDWR)) {
errno = EACCES;
kern_sti();
return -1;
} else {
va_list l;
 
va_start(l, oflag);
m = va_arg(l,mode_t);
attr = va_arg(l, struct mq_attr *);
va_end(l);
 
mq = free_mq;
if (mq != -1) {
mq_table[mq].name = kern_alloc(strlen((char *)name)+1);
if (!mq_table[mq].name) {
errno = ENOSPC;
kern_sti();
return -1;
}
strcpy(mq_table[mq].name, (char *)name);
 
if (attr) {
mq_table[mq].maxmsg = attr->mq_maxmsg;
mq_table[mq].msgsize = attr->mq_msgsize;
}
else {
mq_table[mq].maxmsg = MQ_DEFAULT_MAXMSG;
mq_table[mq].msgsize = MQ_DEFAULT_MSGSIZE;
}
iq_init(&mq_table[mq].blocked_send, &freedesc, 0);
iq_init(&mq_table[mq].blocked_rcv, &freedesc, 0);
 
mq_table[mq].count = 0;
mq_table[mq].start = -1;
mq_table[mq].mq_first = 0;
 
if (oflag & O_NONBLOCK)
mq_table[mq].flags = MQ_USED | MQ_NONBLOCK;
else
mq_table[mq].flags = MQ_USED;
 
mq_table[mq].mq_data = (BYTE *)
kern_alloc(mq_table[mq].maxmsg * mq_table[mq].msgsize);
if (!mq_table[mq].mq_data) {
kern_free(mq_table[mq].name,strlen((char *)name)+1);
 
errno = ENOSPC;
kern_sti();
return -1;
}
 
mq_table[mq].mq_info = (struct mq_elem *)
kern_alloc(mq_table[mq].maxmsg * sizeof(struct mq_elem));
if (!mq_table[mq].mq_info) {
kern_free(mq_table[mq].name,strlen((char *)name)+1);
kern_free(mq_table[mq].mq_data,
mq_table[mq].maxmsg * mq_table[mq].msgsize);
 
errno = ENOSPC;
kern_sti();
return -1;
}
 
/* set up the element queue */
for (i=0; i<mq_table[mq].maxmsg-1; i++)
mq_table[mq].mq_info[i].next = i+1;
mq_table[mq].mq_info[mq_table[mq].maxmsg-1].next = -1;
mq_table[mq].mq_first = 0;
 
free_mq = mq_table[mq].next;
kern_sti();
return mq;
}
else {
errno = ENOSPC;
kern_sti();
return -1;
}
}
}
}
 
int mq_close(mqd_t mqdes)
{
kern_cli();
 
if (mqdes < 0 ||
mqdes >= MQ_OPEN_MAX ||
!(mq_table[mqdes].flags & MQ_USED) ) {
errno = EBADF;
kern_sti();
return -1;
}
 
kern_free(mq_table[mqdes].name, strlen(mq_table[mqdes].name)+1);
kern_free(mq_table[mqdes].mq_data,
mq_table[mqdes].maxmsg * mq_table[mqdes].msgsize);
kern_free(mq_table[mqdes].mq_info,
mq_table[mqdes].maxmsg * sizeof(struct mq_elem));
 
mq_table[mqdes].flags = 0;
mq_table[mqdes].next = free_mq;
free_mq = mqdes;
 
kern_sti();
return 0;
}
 
int mq_unlink(const char *name)
{
int i;
int found = 0;
 
kern_cli();
 
for (i = 0; i < MQ_OPEN_MAX; i++)
if (mq_table[i].flags & MQ_USED) {
if (strcmp((char*)name, mq_table[i].name) == 0) {
found = 1;
}
}
 
if (found) {
kern_free(mq_table[i].name, strlen((char *)name)+1);
kern_free(mq_table[i].mq_data,
mq_table[i].maxmsg * mq_table[i].msgsize);
kern_free(mq_table[i].mq_info,
mq_table[i].maxmsg * sizeof(struct mq_elem));
mq_table[i].flags = 0;
mq_table[i].next = free_mq;
free_mq = i;
kern_sti();
return 0;
} else {
errno = ENOENT;
kern_sti();
return -1;
}
}
 
/* this function inserts a message in amessage queue mantaining the
priority order */
static void insert_mq_entry(mqd_t mqdes, int newmsg)
{
int prio; /* the priority of the message to insert */
int p,q; /* the messages... */
 
p = NIL;
q = mq_table[mqdes].start;
prio = mq_table[mqdes].mq_info[ newmsg ].mq_prio;
 
while ((q != NIL) && (prio <= mq_table[mqdes].mq_info[ q ].mq_prio)) {
p = q;
q = mq_table[mqdes].mq_info[ q ].next;
}
 
if (p != NIL)
mq_table[mqdes].mq_info[ p ].next = newmsg;
else
mq_table[mqdes].start = newmsg;
 
mq_table[mqdes].mq_info[ newmsg ].next = q;
}
 
 
 
 
 
 
/* this is the test that is done when a task is being killed
and it is waiting on a sigwait */
static int mq_cancellation_point(PID i, void *arg)
{
LEVEL l;
 
if (proc_table[i].status == WAIT_MQSEND) {
/* the task that have to be killed is waiting on a mq_send */
 
/* we have to extract the task from the blocked queue... */
iq_extract(i,&mq_table[mqproc_table[i].mqdes].blocked_send);
 
/* and the task have to be reinserted into the ready queues, so it
will fall into task_testcancel */
l = proc_table[i].task_level;
level_table[l]->task_insert(l,i);
 
return 1;
}
 
if (proc_table[i].status == WAIT_MQRECEIVE) {
/* the task that have to be killed is waiting on a mq_send */
 
/* we have to extract the task from the blocked queue... */
iq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_rcv);
 
/* and the task have to be reinserted into the ready queues, so it
will fall into task_testcancel */
l = proc_table[i].task_level;
level_table[l]->task_insert(l,i);
 
return 1;
}
 
return 0;
}
 
int mq_interrupted_by_signal(PID i, void *arg)
{
LEVEL l;
 
if (proc_table[i].status == WAIT_MQSEND) {
/* the task is waiting on a nanosleep and it is still receiving a
signal... */
mqproc_table[exec_shadow].intsig = 1;
 
/* we have to extract the task from the blocked queue... */
iq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_send);
 
/* and the task have to be reinserted into the ready queues, so it
will fall into task_testcancel */
l = proc_table[i].task_level;
level_table[l]->task_insert(l,i);
 
return 1;
}
 
if (proc_table[i].status == WAIT_MQRECEIVE) {
/* the task is waiting on a nanosleep and it is still receiving a
signal... */
mqproc_table[exec_shadow].intsig = 1;
 
/* we have to extract the task from the blocked queue... */
iq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_rcv);
 
/* and the task have to be reinserted into the ready queues, so it
will fall into task_testcancel */
l = proc_table[i].task_level;
level_table[l]->task_insert(l,i);
 
return 1;
}
 
return 0;
}
 
 
 
 
 
int mq_send(mqd_t mqdes, const char *msg_ptr, size_t msg_len,
unsigned int msg_prio)
{
int newmsg;
 
task_testcancel();
 
kern_cli();
 
/* first, if it is the first time that mq_receive or mq_send is called,
register the cancellation point */
if (mq_once) {
mq_once = 0;
register_cancellation_point(mq_cancellation_point, NULL);
register_interruptable_point(mq_interrupted_by_signal, NULL);
}
 
if (mqdes < 0 ||
mqdes >= MQ_OPEN_MAX ||
!(mq_table[mqdes].flags & MQ_USED) ) {
errno = EBADF;
kern_sti();
return -1;
}
 
if (msg_len > mq_table[mqdes].msgsize) {
errno = EMSGSIZE;
kern_sti();
return -1;
}
 
if (msg_prio > MQ_PRIO_MAX) {
errno = EINVAL;
kern_sti();
return -1;
}
 
/* block the task if necessary */
if (mq_table[mqdes].mq_first == -1) {
/* the message queue is full!!! */
if (mq_table[mqdes].flags & O_NONBLOCK) {
errno = EAGAIN;
kern_sti();
return -1;
}
else {
LEVEL l;
struct timespec ty;
TIME tx;
 
/* we block the task until:
- a message is received, or
- a signal is sent to the task, or
- the task is killed */
 
mqproc_table[exec_shadow].intsig = 0;
 
/* SAME AS SCHEDULER... manage the capacity event and the load_info */
ll_gettime(TIME_EXACT, &schedule_time);
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
proc_table[exec_shadow].avail_time -= tx;
jet_update_slice(tx);
if (cap_timer != NIL) {
event_delete(cap_timer);
cap_timer = NIL;
}
l = proc_table[exec_shadow].task_level;
level_table[l]->task_extract(l,exec_shadow);
 
/* we insert the task in the message queue */
proc_table[exec_shadow].status = WAIT_MQSEND;
iq_priority_insert(exec_shadow,&mq_table[mqdes].blocked_send);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
ll_context_to(proc_table[exec_shadow].context);
kern_deliver_pending_signals();
 
/* mq_send is a cancellation point... */
task_testcancel();
 
if (mqproc_table[exec_shadow].intsig) {
errno = EINTR;
kern_sti();
return -1;
}
}
}
 
/* Now there is space to insert a new message */
/* alloc a descriptor */
newmsg = mq_table[mqdes].mq_first;
mq_table[mqdes].mq_first = mq_table[mqdes].mq_info[newmsg].next;
mq_table[mqdes].count++;
 
/* fill the data */
memcpy(mq_table[mqdes].mq_data + newmsg * mq_table[mqdes].msgsize,
msg_ptr, msg_len);
mq_table[mqdes].mq_info[ newmsg ].mq_prio = msg_prio;
mq_table[mqdes].mq_info[ newmsg ].msglen = msg_len;
 
/* insert the data in an ordered way */
insert_mq_entry(mqdes, newmsg);
 
// kern_printf("Ûmq_des=%d, newmsg=%d, count=%dÛ",
// mqdes, newmsg, mq_table[mqdes].count);
 
if (mq_table[mqdes].count == 1) {
/* the mq was empty */
PID p;
 
p = iq_getfirst(&mq_table[mqdes].blocked_rcv);
 
if ( p != NIL) {
/* The first blocked task has to be woken up */
LEVEL l;
 
proc_table[exec_shadow].context = ll_context_from();
 
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
 
/* Preempt if necessary */
scheduler();
kern_context_load(proc_table[exec_shadow].context);
return 0;
}
else if (mq_table[mqdes].flags & MQ_NOTIFICATION_PRESENT) {
mq_table[mqdes].flags &= ~MQ_NOTIFICATION_PRESENT;
 
// manage the notification...
if (mq_table[mqdes].notification.sigev_notify == SIGEV_SIGNAL) {
// there is no signal pending... post the signal!!!
sigqueue_internal(0,
mq_table[mqdes].notification.sigev_signo,
mq_table[mqdes].notification.sigev_value,
SI_MESGQ);
} else if (mq_table[mqdes].notification.sigev_notify == SIGEV_THREAD) {
/* a new thread must be created; note that the pthread_create
calls task_createn and task_activate; if task_activate is called
into signal handlers and calls event_need_reschedule */
pthread_t new_thread;
if (mq_table[mqdes].notification.sigev_notify_attributes)
pthread_create(&new_thread,
mq_table[mqdes].notification.sigev_notify_attributes,
(void *(*)(void *))mq_table[mqdes].notification.sigev_notify_function,
mq_table[mqdes].notification.sigev_value.sival_ptr);
else {
pthread_attr_t new_attr;
// the task must be created detached
pthread_attr_init(&new_attr);
pthread_attr_setdetachstate(&new_attr, PTHREAD_CREATE_DETACHED);
pthread_create(&new_thread,
&new_attr,
(void *(*)(void *))mq_table[mqdes].notification.sigev_notify_function,
&mq_table[mqdes].notification.sigev_value);
}
}
}
}
 
kern_sti();
return 0;
}
 
ssize_t mq_receive(mqd_t mqdes, char *msg_ptr, size_t msg_len,
unsigned int *msg_prio)
{
int msg;
PID p;
ssize_t returnvalue;
 
task_testcancel();
 
kern_cli();
 
/* first, if it is the first time that mq_receive or mq_send is called,
register the cancellation point */
if (mq_once) {
mq_once = 0;
register_cancellation_point(mq_cancellation_point, NULL);
register_interruptable_point(mq_interrupted_by_signal, NULL);
}
 
if (mqdes < 0 ||
mqdes >= MQ_OPEN_MAX ||
!(mq_table[mqdes].flags & MQ_USED) ) {
errno = EBADF;
kern_sti();
return -1;
}
 
if (msg_len > mq_table[mqdes].msgsize) {
errno = EMSGSIZE;
kern_sti();
return -1;
}
 
/* block the task if necessary */
if (mq_table[mqdes].start == -1) {
/* the message queue is empty!!! */
if (mq_table[mqdes].flags & O_NONBLOCK) {
errno = EAGAIN;
kern_sti();
return -1;
}
else {
LEVEL l;
struct timespec ty;
TIME tx;
 
/* we block the task until:
- a message arrives, or
- a signal is sent to the task, or
- the task is killed */
 
mqproc_table[exec_shadow].intsig = 0;
 
/* SAME AS SCHEDULER... manage the capacity event and the load_info */
ll_gettime(TIME_EXACT, &schedule_time);
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
proc_table[exec_shadow].avail_time -= tx;
jet_update_slice(tx);
if (cap_timer != NIL) {
event_delete(cap_timer);
cap_timer = NIL;
}
l = proc_table[exec_shadow].task_level;
level_table[l]->task_extract(l,exec_shadow);
 
/* we insert the task into the message queue */
proc_table[exec_shadow].status = WAIT_MQRECEIVE;
iq_priority_insert(exec_shadow,&mq_table[mqdes].blocked_rcv);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
ll_context_to(proc_table[exec_shadow].context);
kern_deliver_pending_signals();
 
/* mq_receive is a cancellation point... */
task_testcancel();
 
if (mqproc_table[exec_shadow].intsig) {
errno = EINTR;
kern_sti();
return -1;
}
}
}
 
/* Now there is at least one message...
copy it to the destination, ... */
msg = mq_table[mqdes].start;
memcpy(msg_ptr,
mq_table[mqdes].mq_data + msg * mq_table[mqdes].msgsize,
mq_table[mqdes].msgsize);
 
/* ...update the first messagee and the counters, ... */
mq_table[mqdes].count++;
mq_table[mqdes].start = mq_table[mqdes].mq_info[ msg ].next;
/* and finally the free message queue */
mq_table[mqdes].mq_info[ msg ].next = mq_table[mqdes].mq_first;
mq_table[mqdes].mq_first = msg;
 
/* return the priority if required */
if (msg_prio) {
*msg_prio = mq_table[mqdes].mq_info[ msg ].mq_prio;
}
 
/* set the returnvalue */
returnvalue = mq_table[mqdes].mq_info[ msg ].msglen;
 
/* if the mq was full, there may be a task into blocked-send queue */
p = iq_getfirst(&mq_table[mqdes].blocked_send);
 
if ( p != NIL) {
/* The first blocked task on send has to be woken up */
LEVEL l;
 
proc_table[exec_shadow].context = ll_context_from();
 
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
 
/* Preempt if necessary */
scheduler();
kern_context_load(proc_table[exec_shadow].context);
return returnvalue;
}
 
kern_sti();
return returnvalue;
}
 
int mq_notify(mqd_t mqdes, const struct sigevent *notification)
{
kern_cli();
 
if (mqdes < 0 ||
mqdes >= MQ_OPEN_MAX ||
!(mq_table[mqdes].flags & MQ_USED) ) {
errno = EBADF;
kern_sti();
return -1;
}
 
if (mq_table[mqdes].flags & MQ_NOTIFICATION_PRESENT) {
if (!notification) {
mq_table[mqdes].flags &= ~MQ_NOTIFICATION_PRESENT;
kern_sti();
return 0;
}
else {
errno = EBUSY;
kern_sti();
return -1;
}
}
 
mq_table[mqdes].flags |= MQ_NOTIFICATION_PRESENT;
 
memcpy(&mq_table[mqdes].notification, notification,sizeof(struct sigevent));
 
kern_sti();
return 0;
}
 
int mq_setattr(mqd_t mqdes, const struct mq_attr *mqstat,
struct mq_attr *omqstat)
{
kern_cli();
 
if (mqdes < 0 ||
mqdes >= MQ_OPEN_MAX ||
!(mq_table[mqdes].flags & MQ_USED) ) {
errno = EBADF;
kern_sti();
return -1;
}
 
if (omqstat) {
omqstat->mq_flags = mq_table[mqdes].flags & O_NONBLOCK;
omqstat->mq_maxmsg = mq_table[mqdes].maxmsg;
omqstat->mq_msgsize = mq_table[mqdes].msgsize;
omqstat->mq_curmsgs = mq_table[mqdes].count;
}
 
mq_table[mqdes].flags = (mq_table[mqdes].flags & ~O_NONBLOCK) |
(mqstat->mq_flags & O_NONBLOCK);
kern_sti();
return 0;
}
 
int mq_getattr(mqd_t mqdes, struct mq_attr *mqstat)
{
kern_cli();
 
if (mqdes < 0 ||
mqdes >= MQ_OPEN_MAX ||
!(mq_table[mqdes].flags & MQ_USED) ) {
errno = EBADF;
kern_sti();
return -1;
}
 
mqstat->mq_flags = mq_table[mqdes].flags & O_NONBLOCK;
mqstat->mq_maxmsg = mq_table[mqdes].maxmsg;
mqstat->mq_msgsize = mq_table[mqdes].msgsize;
mqstat->mq_curmsgs = mq_table[mqdes].count;
 
kern_sti();
return 0;
}
/shark/tags/rel_0_2/kernel/printk.c
0,0 → 1,114
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/*
* Copyright (C) 1999 Massimiliano Giorgi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
/*
* CVS : $Id: printk.c,v 1.2 2002-10-28 07:56:49 pj Exp $
*
* File: $File$
* Revision: $Revision: 1.2 $
* Last update: $Date: 2002-10-28 07:56:49 $
*/
 
#include <ll/i386/cons.h>
#include <ll/stdarg.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <kernel/log.h>
#include <kernel/func.h>
 
static int printklevel=0;
 
static char *levelname[]={
"emerg ",
"alert ",
"crit ",
"err ",
"warn ",
"notice",
"info ",
"debug "
};
 
static int vprintk(int flag, char *fmt, va_list ap)
{
static char buf[1024]; /* DANGER !!!!! */
SYS_FLAGS f;
int level;
int result;
result=sscanf(fmt,"<%i>",&level);
if (result==1) {
if (level<LOG_EMERG||level>LOG_DEBUG) level=LOG_INFO;
fmt+=3;
} else {
level=LOG_INFO;
}
if (level<=printklevel) return 0;
vsprintf(buf,(char*)fmt,ap);
result=(strchr(fmt,'\n')!=NULL);
 
f=kern_fsave();
cprintf("[%s] %s",levelname[level],buf);
/* if we called printk, and the string does not have a \n in it, add it */
if ((!flag)&&(!result)) cprintf("\n");
kern_frestore(f);
return 0;
}
 
int printk(const char *fmt, ...)
{
va_list ap;
int res;
va_start(ap, fmt);
res=vprintk(0,(char *)fmt, ap);
va_end(ap);
return res;
}
 
int printkboot(const char *fmt, ...)
{
va_list ap;
int res;
va_start(ap, fmt);
res=vprintk(1,(char *)fmt, ap);
va_end(ap);
return res;
}
/shark/tags/rel_0_2/kernel/conditio.c
0,0 → 1,398
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: conditio.c,v 1.2 2002-11-11 08:34:08 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:34:08 $
------------
 
This file contains the condition variables handling functions.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
 
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <errno.h>
#include <kernel/iqueue.h>
 
/*---------------------------------------------------------------------*/
/* Condition variables */
/*---------------------------------------------------------------------*/
 
static int condition_once = 1;
 
/* this is the test that is done when a task is being killed
and it is waiting on a condition */
static int condition_cancellation_point(PID i, void *arg)
{
LEVEL l;
 
if (proc_table[i].status == WAIT_COND) {
/* if the task is waiting on a condition variable, we have to extract it
from the waiters queue, then set the KILL_REQUEST flag, and reinsert
the task into the ready queue so it can reaquire the mutex and die */
iq_extract(i,&proc_table[i].cond_waiting->waiters);
if (iq_isempty(&proc_table[i].cond_waiting->waiters))
proc_table[i].cond_waiting->used_for_waiting = NULL;
proc_table[i].cond_waiting = NULL;
 
l = proc_table[i].task_level;
level_table[l]->task_insert(l,i);
/* then, the kill_request flag is set, and when the task is rescheduled
it autokill itself... */
 
return 1;
}
 
return 0;
}
 
 
int cond_init(cond_t *cond)
{
/* first, if it is the first time that the cond_init is called,
register the cancellation point */
if (condition_once) {
condition_once = 0;
register_cancellation_point(condition_cancellation_point, NULL);
}
 
iq_init (&cond->waiters, &freedesc, 0);
 
cond->used_for_waiting = NULL;
 
return 0;
}
 
int cond_destroy(cond_t *cond)
{
if (!iq_isempty(&cond->waiters))
return (EBUSY);
 
return 0;
}
 
int cond_signal(cond_t *cond)
{
LEVEL l;
PID p;
 
proc_table[exec_shadow].context = kern_context_save();
 
if (!iq_isempty(&cond->waiters)) {
p = iq_getfirst(&cond->waiters);
 
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
 
scheduler();
}
 
kern_context_load(proc_table[exec_shadow].context);
return 0;
}
 
int cond_broadcast(cond_t *cond)
{
LEVEL l;
PID p;
 
proc_table[exec_shadow].context = kern_context_save();
 
if (!iq_isempty(&cond->waiters)) {
do {
p = iq_getfirst(&cond->waiters);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
} while(!iq_isempty(&cond->waiters));
 
scheduler();
}
kern_context_load(proc_table[exec_shadow].context);
return 0;
}
 
int cond_wait(cond_t *cond, mutex_t *mutex)
{
LEVEL l;
struct timespec ty;
TIME tx;
 
/* Why I used task_nopreempt???... because we have to unlock the mutex,
and we can't call mutex_unlock after kern_context_save (the unlock
could call context_save itself...) */
task_nopreempt();
 
/* First, the cond_wait is a cancellation point... */
task_testcancel();
 
/* all the task that uses NOW this condition are waiting on the same
mutex??? */
if (cond->used_for_waiting) {
if (cond->used_for_waiting != mutex) {
task_preempt();
return (EINVAL);
}
}
else
cond->used_for_waiting = mutex;
 
/* If the task is not canceled with testcancel, we block it now... */
 
/* The mutex can't be destroyed while we are waiting on a condition,
so we tell the mutex that a task is using it althought it is not
busy (the cond_wait have to unlock the mutex!!!)... */
mutex->use++;
if (mutex_unlock(mutex)) {
/* some problems unlocking the mutex... */
mutex->use--;
task_preempt();
return (EINVAL);
}
 
/* now, we really block the task... */
proc_table[exec_shadow].context = kern_context_save();
 
/* SAME AS SCHEDULER... manage the capacity event and the load_info */
ll_gettime(TIME_EXACT, &schedule_time);
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
proc_table[exec_shadow].avail_time -= tx;
jet_update_slice(tx);
if (cap_timer != NIL) {
event_delete(cap_timer);
cap_timer = NIL;
}
 
l = proc_table[exec_shadow].task_level;
level_table[l]->task_extract(l,exec_shadow);
 
/* we insert the task in the condition queue */
proc_table[exec_shadow].status = WAIT_COND;
iq_priority_insert(exec_shadow,&cond->waiters);
 
/* then, we set into the processor descriptor the condition on that
the task is blocked... (if the task is killed while it is waiting
on the condition, we have to remove it from the waiters queue, so
we need the condition variable...) */
proc_table[exec_shadow].cond_waiting = cond;
 
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
ll_context_to(proc_table[exec_shadow].context);
kern_sti();
 
/* when we arrive here:
- the task did't die while it was waiting on the condition
(normally, the cancelability state is set to deferred;
if someone kills the task, we have first to lock the mutex,
and then die. Furthermore no cond_signal can be catched by a task
that have to die, so in the makefree function we extract the
task from the waiters queue)
- the task still in the non-preemptive state
- the task have to reaquire the mutex to test again the condition
- the task have to reset the cond_waiting pointer set before
*/
if (proc_table[exec_shadow].cond_waiting != NULL) {
proc_table[exec_shadow].cond_waiting = NULL;
 
if (iq_isempty(&cond->waiters)) cond->used_for_waiting = NULL;
}
task_preempt();
 
mutex_lock(mutex);
mutex->use--;
 
task_testcancel();
 
return 0;
}
 
 
/* if this event fires the task passed as argument is blocked on a condition
with a cond_timedwait.
We have to:
- extract the task from the waiters queue, because the task has to be
woken up and must not use any cond_signal
- reset the delaytimer...
- call the task-insert...
*/
void condition_timer(void *arg)
{
PID p = (PID)arg;
LEVEL l;
 
iq_extract(p,&proc_table[p].cond_waiting->waiters);
if (iq_isempty(&proc_table[p].cond_waiting->waiters))
proc_table[p].cond_waiting->used_for_waiting = NULL;
proc_table[p].cond_waiting = NULL;
 
proc_table[p].delay_timer = -1;
 
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
 
event_need_reschedule();
}
 
int cond_timedwait(cond_t *cond, mutex_t *mutex,
const struct timespec *abstime)
{
LEVEL l;
int returnvalue = 0;
struct timespec ty;
TIME tx;
 
/* Why I used task_nopreempt???... because we have to unlock the mutex,
and we can't call mutex_unlock after kern_context_save (the unlock
could call context_save itself...) */
task_nopreempt();
 
/* First, the cond_wait is a cancellation point... */
task_testcancel();
 
/* all the task that uses NOW this condition are waiting on the same
mutex??? */
if (cond->used_for_waiting) {
if (cond->used_for_waiting != mutex) {
task_preempt();
return (EINVAL);
}
}
else
cond->used_for_waiting = mutex;
 
/* If the task is not canceled with testcancel, we block it now... */
 
/* The mutex can't be destroyed while we are waiting on a condition,
so we tell the mutex that a task is using it althought it is not
busy (the cond_wait have to unlock the mutex!!!)... */
mutex->use++;
if (mutex_unlock(mutex)) {
/* some problems unlocking the mutex... */
mutex->use--;
task_preempt();
return (EINVAL);
}
 
/* now, we really block the task... */
proc_table[exec_shadow].context = kern_context_save();
 
/* SAME AS SCHEDULER... manage the capacity event and the load_info */
ll_gettime(TIME_EXACT, &schedule_time);
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
proc_table[exec_shadow].avail_time -= tx;
jet_update_slice(tx);
if (cap_timer != NIL) {
event_delete(cap_timer);
cap_timer = NIL;
}
 
l = proc_table[exec_shadow].task_level;
level_table[l]->task_extract(l,exec_shadow);
 
/* we insert the task in the condition queue */
proc_table[exec_shadow].status = WAIT_COND;
iq_priority_insert(exec_shadow,&cond->waiters);
 
/* then, we set into the processor descriptor the condition on that
the task is blocked... (if the task is killed while it is waiting
on the condition, we have to remove it from the waiters queue, so
we need the condition variable...) */
proc_table[exec_shadow].cond_waiting = cond;
 
/* we can use the delaytimer because if we are here we are not in a
task_delay */
proc_table[exec_shadow].delay_timer =
kern_event_post(abstime,condition_timer,(void *)exec_shadow);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
ll_context_to(proc_table[exec_shadow].context);
 
if (proc_table[exec_shadow].delay_timer != -1)
event_delete(proc_table[exec_shadow].delay_timer);
 
kern_sti();
 
/* when we arrive here:
- the task did't die while it was waiting on the condition
(normally, the cancelability state is set to deferred;
if someone kills the task, we have first to lock the mutex,
and then die. Furthermore no cond_signal can be catched by a task
that have to die, so in the makefree function we extract the
task from the waiters queue)
- the task still in the non-preemptive state
- the task have to reaquire the mutex to test again the condition
- the task have to reset the cond_waiting pointer set before
Note that cond_wait has to be called with cancelability set to
deferred... so we insert a testcancel after the mutex_lock...
*/
if (proc_table[exec_shadow].cond_waiting != NULL) {
proc_table[exec_shadow].cond_waiting = NULL;
 
if (iq_isempty(&cond->waiters)) cond->used_for_waiting = NULL;
}
else
/* cond_waiting == NULL if the task is killed or the timer has fired */
returnvalue = ETIMEDOUT;
 
task_preempt();
 
mutex_lock(mutex);
mutex->use--;
 
task_testcancel();
 
return returnvalue;
}
/shark/tags/rel_0_2/kernel/exchgrx.c
0,0 → 1,104
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: exchgrx.c,v 1.2 2002-10-28 07:58:04 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:58:04 $
------------
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
#include <kernel/kern.h>
 
static int myflag;
static siginfo_t myinfo;
static struct timespec mytime;
 
static void thehandler(int signo, siginfo_t *info, void *extra);
static void theend(void *arg);
 
/*
This exception handler should be good for text applications that do NOT
use graphics
*/
int set_exchandler_grx(void)
{
struct sigaction action;
 
myflag = 0;
 
sys_atrunlevel(theend, NULL, RUNLEVEL_AFTER_EXIT);
 
/* Init the standard S.Ha.R.K. exception handler */
action.sa_flags = SA_SIGINFO; /* Set the signal action */
action.sa_sigaction = thehandler;
action.sa_handler = 0;
sigfillset(&action.sa_mask); /* we block all the other signals... */
 
return sigaction(SIGHEXC, &action, NULL); /* set the signal */
}
 
void thehandler(int signo, siginfo_t *info, void *extra)
{
if (!myflag) {
myflag = 1;
myinfo = *info;
sys_gettime(&mytime),
 
sys_abort(AHEXC);
}
}
 
void theend(void *arg)
{
if (myflag) {
kern_printf("\nS.Ha.R.K. Exception raised!!!"
"\nTime (s:ns) :%ld:%ld"
"\nException number:%d"
"\nPID :%d\n",
mytime.tv_sec,
mytime.tv_nsec,
myinfo.si_value.sival_int,
myinfo.si_task);
}
}
 
 
 
/shark/tags/rel_0_2/kernel/exchtxt.c
0,0 → 1,88
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: exchtxt.c,v 1.2 2002-11-11 08:34:08 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:34:08 $
------------
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
#include <kernel/kern.h>
 
static void thehandler(int signo, siginfo_t *info, void *extra);
 
/*
This exception handler should be good for text applications that do NOT
use graphics
*/
int set_exchandler_text(void)
{
struct sigaction action;
 
/* Init the standard S.Ha.R.K. exception handler */
action.sa_flags = SA_SIGINFO; /* Set the signal action */
action.sa_sigaction = thehandler;
action.sa_handler = 0;
sigfillset(&action.sa_mask); /* we block all the other signals... */
 
return sigaction(SIGHEXC, &action, NULL); /* set the signal */
}
 
 
void thehandler(int signo, siginfo_t *info, void *extra)
{
struct timespec t;
 
/* Default action for an kern exception is */
kern_cli();
ll_gettime(TIME_EXACT, &t),
kern_printf("\nHartik Exception raised!!!"
"\nTime (s:ns) :%ld:%ld"
"\nException number:%d"
"\nPID :%d\n",
t.tv_sec, t.tv_nsec, info->si_value.sival_int,
info->si_task);
sys_abort(AHEXC);
}
 
 
 
 
/shark/tags/rel_0_2/kernel/modules/pc.c
0,0 → 1,506
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: pc.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
------------
 
Priority Ceiling protocol. see pc.h for more details...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/pc.h>
 
#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <kernel/const.h>
#include <modules/codes.h>
#include <sys/types.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
typedef struct PC_mutexstruct_t PC_mutex_t;
 
/* The PC resource level descriptor */
typedef struct {
mutex_resource_des m; /*+ the mutex interface +*/
 
int nlocked[MAX_PROC]; /*+ how many mutex a task currently locks +*/
 
PC_mutex_t *mlist; /*+ the list of the busy mutexes +*/
DWORD priority[MAX_PROC]; /*+ the PC priority of the tasks in the system +*/
 
PID blocked[MAX_PROC];
 
} PC_mutex_resource_des;
 
/* this is the structure normally pointed by the opt field in the
mutex_t structure */
struct PC_mutexstruct_t {
PID owner;
int nblocked;
PID firstblocked;
 
DWORD ceiling;
PC_mutex_t *next;
PC_mutex_t *prev;
};
 
/* This is the test done when a task try to lock a mutex.
It checks if the system ceiling is less than the process priority
It returns 1 if the task can lock the mutex, 0 otherwise */
static int PC_accept(PC_mutex_resource_des *lev, DWORD prio)
{
PC_mutex_t *l = lev->mlist;
 
while (l) {
if (l->owner != exec_shadow)
/* l points to a mutex owned by another task. Its ceiling is the
system ceiling... */
return prio < l->ceiling;
 
l = l->next;
}
 
/* no busy mutexes other than mine!!! */
return 1;
}
 
/* this function inserts a mutex in the mutex list.
the code is similar to q_insert of queue.c */
static void PC_insert(PC_mutex_resource_des *lev, PC_mutex_t * m)
{
DWORD prio;
PC_mutex_t *p, *q;
 
p = NULL;
q = lev->mlist;
prio = m->ceiling;
 
while ((q != NULL) && (prio >= q->ceiling)) {
p = q;
q = q->next;
}
 
if (p != NULL)
p->next = m;
else
lev->mlist = m;
 
if (q != NULL) q->prev = m;
 
m->next = q;
m->prev = p;
}
 
/* this function extracts a mutex in the mutex list.
the code is similar to q_extract of queue.c */
static void PC_extract(PC_mutex_resource_des *lev, PC_mutex_t * m)
{
PC_mutex_t *p, *q;
 
//kern_printf("extract: prev=%d next = %d\n",m->prev, m->next);
p = m->prev;
q = m->next;
 
if (p == NULL) lev->mlist = q;
else p->next = m->next;
 
if (q != NULL) q->prev = m->prev;
}
 
 
 
/*+ print resource protocol statistics...+*/
static void PC_resource_status(RLEVEL r)
{
PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[r]);
PID i;
 
kern_printf("Resources owned by the tasks:\n");
for (i=0; i<MAX_PROC; i++) {
kern_printf("%-4d", m->nlocked[i]);
}
 
kern_printf("\nPC priority of the tasks:\n");
for (i=0; i<MAX_PROC; i++) {
kern_printf("%-4ld", m->priority[i]);
}
// in the future: print the status of the blocked semaphores!
 
}
 
 
static int PC_level_accept_resource_model(RLEVEL l, RES_MODEL *r)
{
if (r->rclass == PC_RCLASS || r->rclass == (PC_RCLASS | l) )
return 0;
else
return -1;
}
 
static void PC_res_register(RLEVEL l, PID p, RES_MODEL *r)
{
PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[l]);
PC_RES_MODEL *pc = (PC_RES_MODEL *)r;
 
m->priority[p] = pc->priority;
m->nlocked[p] = 0;
}
 
static void PC_res_detach(RLEVEL l, PID p)
{
PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[l]);
 
if (m->nlocked[p])
kern_raise(XMUTEX_OWNER_KILLED, p);
else
m->nlocked[p] = 0;
 
m->priority[p] = MAX_DWORD;
}
 
static int PC_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a)
{
if (a->mclass == PC_MCLASS || a->mclass == (PC_MCLASS | l) )
return 0;
else
return -1;
}
 
static int PC_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
{
PC_mutex_t *p;
 
p = (PC_mutex_t *) kern_alloc(sizeof(PC_mutex_t));
 
/* control if there is enough memory; no control on init on a
non- destroyed mutex */
 
if (!p)
return (ENOMEM);
 
p->owner = NIL;
p->nblocked = 0;
p->firstblocked = NIL;
 
p->ceiling = ((PC_mutexattr_t *)a)->ceiling;
p->next = 0;
 
 
m->mutexlevel = l;
m->opt = (void *)p;
 
return 0;
}
 
 
static int PC_destroy(RLEVEL l, mutex_t *m)
{
// PC_mutex_resource_des *lev = (PC_mutex_resource_des *)(resource_table[l]);
 
if ( ((PC_mutex_t *)m->opt)->nblocked)
return (EBUSY);
 
kern_cli();
if (m->opt) {
kern_free(m->opt,sizeof(PC_mutex_t));
m->opt = NULL;
}
kern_sti();
 
return 0;
}
 
/* see pi.c for informations on the blocking algorithm used */
static int PC_lock(RLEVEL l, mutex_t *m)
{
PC_mutex_resource_des *lev = (PC_mutex_resource_des *)(resource_table[l]);
PC_mutex_t *p;
 
kern_cli();
 
p = (PC_mutex_t *)m->opt;
if (!p)
/* if the mutex is not initialized, return an error! */
return (EINVAL);
 
if (p->owner == exec_shadow) {
/* the task already owns the mutex */
kern_sti();
return (EDEADLK);
}
 
if (p->ceiling > lev->priority[exec_shadow])
/* see POSIX standard p. 258 */
return (EINVAL);
 
while (!PC_accept(lev, lev->priority[exec_shadow])) {
/* the mutex is locked by someone,
or another mutex with greater ceiling is busy,
"block" the task on the busy mutex with the highest ceiling
(pointed by lev->mlist)...*/
 
//kern_printf("Blocking on %d, owner=%d, exec_shadow=%d\n",lev->mlist,lev->mlist->owner,exec_shadow);
proc_table[exec_shadow].shadow = lev->mlist->owner;
lev->blocked[exec_shadow] = lev->mlist->firstblocked;
lev->mlist->firstblocked = exec_shadow;
lev->mlist->nblocked++;
 
/* ... call the scheduler... */
scheduler();
//kern_printf("schedule: exec=%d, exec_shadow=%d\n",exec,exec_shadow);
kern_context_load(proc_table[exec_shadow].context);
 
/* ... and reaquire the cli() before the test... */
kern_cli();
}
 
/* the mutex is free, We can lock it! */
lev = (PC_mutex_resource_des *)(resource_table[l]);
lev->nlocked[exec_shadow]++;
 
p->owner = exec_shadow;
 
PC_insert(lev, p);
 
kern_sti();
 
return 0;
}
 
static int PC_trylock(RLEVEL l, mutex_t *m)
{
PC_mutex_resource_des *lev = (PC_mutex_resource_des *)(resource_table[l]);
PC_mutex_t *p;
 
kern_cli();
 
p = (PC_mutex_t *)m->opt;
if (!p)
/* if the mutex is not initialized, return an error! */
return (EINVAL);
 
if (p->owner == exec_shadow) {
/* the task already owns the mutex */
kern_sti();
return (EDEADLK);
}
 
if (p->ceiling < lev->priority[exec_shadow])
/* see POSIX standard p. 258 */
return (EINVAL);
 
while (!PC_accept(lev, lev->priority[exec_shadow])) {
/* a task already owns the mutex */
kern_sti();
return (EBUSY);
}
 
/* the mutex is free, We can lock it! */
lev = (PC_mutex_resource_des *)(resource_table[l]);
lev->nlocked[exec_shadow]++;
 
p->owner = exec_shadow;
 
PC_insert(lev, p);
 
kern_sti();
 
return 0;
}
 
static int PC_unlock(RLEVEL l, mutex_t *m)
{
PC_mutex_resource_des *lev;
PC_mutex_t *p;
int i, j;
 
p = (PC_mutex_t *)m->opt;
if (!p)
return (EINVAL);
 
if (p->owner != exec_shadow) {
/* the mutex is owned by another task!!! */
kern_sti();
return (EPERM);
}
 
proc_table[exec_shadow].context = kern_context_save();
 
/* the mutex is mine */
lev = (PC_mutex_resource_des *)(resource_table[l]);
lev->nlocked[exec_shadow]--;
 
p->owner = NIL;
 
/* we unblock all the waiting tasks... */
i = p->firstblocked;
p->firstblocked = NIL;
 
while (i != NIL) {
proc_table[i].shadow = j = i;
i = lev->blocked[i];
lev->blocked[j] = NIL;
}
p->nblocked = 0;
 
PC_extract(lev, p);
 
/* {
int xxx;
kern_printf("(PC_unlock owner=%d ",p->owner);
for (xxx = 0; xxx<5; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow);
kern_printf(")\n");
}*/
 
scheduler();
kern_context_load(proc_table[exec_shadow].context);
 
return 0;
}
 
void PC_register_module(void)
{
RLEVEL l; /* the level that we register */
PC_mutex_resource_des *m; /* for readableness only */
PID i; /* a counter */
 
printk("PC_register_module\n");
 
/* request an entry in the level_table */
l = resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m = (PC_mutex_resource_des *)kern_alloc(sizeof(PC_mutex_resource_des));
 
/* update the level_table with the new entry */
resource_table[l] = (resource_des *)m;
 
/* fill the resource_des descriptor */
strncpy(m->m.r.res_name, PC_MODULENAME, MAX_MODULENAME);
m->m.r.res_code = PC_MODULE_CODE;
m->m.r.res_version = PC_MODULE_VERSION;
 
m->m.r.rtype = MUTEX_RTYPE;
 
m->m.r.resource_status = PC_resource_status;
m->m.r.level_accept_resource_model = PC_level_accept_resource_model;
m->m.r.res_register = PC_res_register;
 
m->m.r.res_detach = PC_res_detach;
 
/* fill the mutex_resource_des descriptor */
m->m.level_accept_mutexattr = PC_level_accept_mutexattr;
m->m.init = PC_init;
m->m.destroy = PC_destroy;
m->m.lock = PC_lock;
m->m.trylock = PC_trylock;
m->m.unlock = PC_unlock;
 
/* fill the PC_mutex_resource_des descriptor */
for (i=0; i<MAX_PROC; i++)
m->nlocked[i] = 0, m->priority[i] = MAX_DWORD, m->blocked[i] = NIL;
 
m->mlist = NULL;
 
}
 
/*+ This function gets the ceiling of a PC mutex, and it have to be called
only by a task that owns the mutex.
Returns -1 if the mutex is not a PC mutex, 0 otherwise +*/
int PC_get_mutex_ceiling(const mutex_t *mutex, DWORD *ceiling)
{
resource_des *r;
 
if (!mutex)
return -1;
 
r = resource_table[mutex->mutexlevel];
 
if (r->rtype != MUTEX_RTYPE ||
r->res_code != PC_MODULE_CODE ||
r->res_version != PC_MODULE_VERSION)
return -1;
 
if (ceiling)
*ceiling = ((PC_mutex_t *)mutex->opt)->ceiling;
else
return -1;
 
return 0;
}
 
/*+ This function sets the ceiling of a PC mutex, and it have to be called
only by a task that owns the mutex.
Returns -1 if the mutex is not a PC mutex, 0 otherwise +*/
int PC_set_mutex_ceiling(mutex_t *mutex, DWORD ceiling, DWORD *old_ceiling)
{
resource_des *r;
 
if (!mutex)
return -1;
 
r = resource_table[mutex->mutexlevel];
 
if (r->rtype != MUTEX_RTYPE ||
r->res_code != PC_MODULE_CODE ||
r->res_version != PC_MODULE_VERSION)
return -1;
 
if (old_ceiling)
*old_ceiling = ((PC_mutex_t *)mutex->opt)->ceiling;
 
((PC_mutex_t *)mutex->opt)->ceiling = ceiling;
return 0;
}
 
void PC_set_task_ceiling(RLEVEL r, PID p, DWORD priority)
{
PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[r]);
m->priority[p] = priority;
}
 
/shark/tags/rel_0_2/kernel/modules/bd_edf.c
0,0 → 1,133
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/*
* Copyright (C) 2000 Massimiliano Giorgi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
/*
* CVS : $Id: bd_edf.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
*
* File: $File$
* Revision: $Revision: 1.1.1.1 $
* Last update: $Date: 2002-03-29 14:12:52 $
*/
 
#include <modules/bd_edf.h>
 
#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <kernel/const.h>
#include <modules/codes.h>
#include <sys/types.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <kernel/assert.h>
 
static int mylevel=-1;
 
typedef struct TAGbd_edf_resource_des
{
resource_des rd;
TIME dl[MAX_PROC];
} bd_edf_resource_des;
 
static int res_level_accept_resource_model(RLEVEL l, RES_MODEL *r)
{
assertk(mylevel==l);
if (r->rclass==BDEDF_RCLASS||r->rclass==(BDEDF_RCLASS|l))
return 0;
else
return -1;
}
 
static void res_register(RLEVEL l, PID p, RES_MODEL *r)
{
bd_edf_resource_des *m=(bd_edf_resource_des*)(resource_table[l]);
BDEDF_RES_MODEL *rm=(BDEDF_RES_MODEL*)r;
assertk(mylevel==l);
m->dl[p]=rm->dl;
}
 
static void res_detach(RLEVEL l, PID p)
{
bd_edf_resource_des *m=(bd_edf_resource_des*)(resource_table[l]);
assertk(mylevel==l);
m->dl[p]=0;
}
 
static void res_resource_status(void)
{}
 
void BD_EDF_register_module(void)
{
RLEVEL l;
bd_edf_resource_des *m;
int i;
/* request an entry in the level_table */
l=resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m=(bd_edf_resource_des*)kern_alloc(sizeof(bd_edf_resource_des));
 
/* update the level_table with the new entry */
resource_table[l]=(resource_des*)m;
 
/* fill the resource_des descriptor */
strcpy(m->rd.res_name,BDEDF_MODULENAME);
m->rd.res_code=BDEDF_MODULE_CODE;
m->rd.res_version=BDEDF_MODULE_VERSION;
m->rd.rtype=DEFAULT_RTYPE;
m->rd.resource_status=res_resource_status;
m->rd.level_accept_resource_model=res_level_accept_resource_model;
m->rd.res_register=res_register;
m->rd.res_detach=res_detach;
 
for (i=0;i<MAX_PROC;i++) m->dl[i]=MAX_TIME;
assertk(mylevel==-1);
mylevel=l;
}
 
TIME bd_edf_getdl(void)
{
bd_edf_resource_des *m;
if (mylevel==-1) return MAX_TIME;
m=(bd_edf_resource_des*)(resource_table[mylevel]);
if (m->dl[exec_shadow]==MAX_TIME) return MAX_TIME;
return m->dl[exec_shadow]+sys_gettime(NULL);
}
/shark/tags/rel_0_2/kernel/modules/hartport.his
0,0 → 1,12
1.2
32 bit testing!! We use check_addr() to validate the buffers...
This is a very dirty bug-avoider :-)
Using 16 bits, check_addr() is a dummy functions.
 
1.3
System calls have been modified, changing slightly their
semantic.
1.4
Exception handling has been integrated!
 
/shark/tags/rel_0_2/kernel/modules/trcdummy.c
0,0 → 1,66
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
#include <ll/sys/types.h>
#include <ll/stdlib.h>
 
#include <kernel/log.h>
 
#include <trace/types.h>
#include <trace/trace.h>
#include <trace/queues.h>
 
static trc_event_t *dummy_queue_get(void *unused)
{
static trc_event_t event;
return &event;
}
 
static int dummy_queue_post(void *unused)
{
return 0;
}
 
static int create_dummy_queue(trc_queue_t *queue, void *unused)
{
queue->get=dummy_queue_get;
queue->post=dummy_queue_post;
queue->data=NULL;
return 0;
}
 
static int terminate_dummy_queue(void *unused)
{
return 0;
}
 
static int activate_dummy_queue(void *unused, int unused2)
{
return 0;
}
 
int trc_register_dummy_queue(void)
{
int res;
res=trc_register_queuetype(TRC_DUMMY_QUEUE,
create_dummy_queue,
activate_dummy_queue,
terminate_dummy_queue);
if (res!=0) printk(KERN_WARNING "can't register tracer dummy queue");
return res;
}
/shark/tags/rel_0_2/kernel/modules/pi.c
0,0 → 1,346
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: pi.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
------------
 
Priority Inhertitance protocol. see pi.h for more details...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/pi.h>
 
#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <modules/codes.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/* The PI resource level descriptor */
typedef struct {
mutex_resource_des m; /*+ the mutex interface +*/
 
int nlocked[MAX_PROC]; /*+ how many mutex a task currently locks +*/
 
PID blocked[MAX_PROC]; /*+ blocked queue ... +*/
} PI_mutex_resource_des;
 
 
/* this is the structure normally pointed by the opt field in the
mutex_t structure */
typedef struct {
PID owner;
int nblocked;
PID firstblocked;
} PI_mutex_t;
 
 
 
/*+ print resource protocol statistics...+*/
static void PI_resource_status(RLEVEL r)
{
PI_mutex_resource_des *m = (PI_mutex_resource_des *)(resource_table[r]);
PID i;
 
kern_printf("Resources owned by the tasks:\n");
for (i=0; i<MAX_PROC; i++) {
kern_printf("%-4d", m->nlocked[i]);
}
}
 
 
static int PI_level_accept_resource_model(RLEVEL l, RES_MODEL *r)
{
/* priority inheritance works with all tasks without Resource parameters */
return -1;
}
 
static void PI_res_register(RLEVEL l, PID p, RES_MODEL *r)
{
/* never called!!! */
}
 
static void PI_res_detach(RLEVEL l, PID p)
{
PI_mutex_resource_des *m = (PI_mutex_resource_des *)(resource_table[l]);
 
if (m->nlocked[p])
kern_raise(XMUTEX_OWNER_KILLED, p);
}
 
static int PI_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a)
{
if (a->mclass == PI_MCLASS || a->mclass == (PI_MCLASS | l) )
return 0;
else
return -1;
}
 
static int PI_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
{
PI_mutex_t *p;
 
p = (PI_mutex_t *) kern_alloc(sizeof(PI_mutex_t));
 
/* control if there is enough memory; no control on init on a
non- destroyed mutex */
 
if (!p)
return (ENOMEM);
 
p->owner = NIL;
p->nblocked = 0;
p->firstblocked = NIL;
 
m->mutexlevel = l;
m->opt = (void *)p;
 
return 0;
}
 
 
static int PI_destroy(RLEVEL l, mutex_t *m)
{
// PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]);
 
if ( ((PI_mutex_t *)m->opt)->nblocked)
return (EBUSY);
 
kern_cli();
if (m->opt) {
kern_free(m->opt,sizeof(PI_mutex_t));
m->opt = NULL;
}
kern_sti();
 
return 0;
}
 
/* Note that in this approach, when unlocking we can't wake up only
one thread, but we have to wake up all the blocked threads, because there
is not a concept of priority between the task... Each woken thread have
to retest he condition.
Normally, they retest it only one time, because if many threads are
unblocked, they are scheduled basing on their priority (unkown in this
module!)... and if the slice is greather than the critical sections,
they never block!
*/
static int PI_lock(RLEVEL l, mutex_t *m)
{
PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]);
PI_mutex_t *p;
// return 0;
kern_cli();
 
p = (PI_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, initialize it! */
PI_mutexattr_t a;
PI_mutexattr_default(a);
PI_init(l, m, &a);
}
 
 
if (p->owner == exec_shadow) {
/* the task already owns the mutex */
kern_sti();
return (EDEADLK);
}
 
while (p->owner != NIL) {
/* the mutex is locked by someone, "block" the task ...*/
proc_table[exec_shadow].shadow = p->owner;
lev->blocked[exec_shadow] = p->firstblocked;
p->firstblocked = exec_shadow;
p->nblocked++;
// kern_printf("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
/* ... call the scheduler... */
scheduler();
kern_context_load(proc_table[exec_shadow].context);
 
/* ... and reaquire the cli() before the test... */
kern_cli();
}
 
/* the mutex is free, We can lock it! */
lev->nlocked[exec_shadow]++;
 
p->owner = exec_shadow;
 
kern_sti();
 
return 0;
}
 
static int PI_trylock(RLEVEL l, mutex_t *m)
{
PI_mutex_t *p;
 
kern_cli();
 
p = (PI_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, initialize it! */
PI_mutexattr_t a;
PI_mutexattr_default(a);
PI_init(l, m, &a);
}
 
if (p->owner != NIL) {
/* a task already owns the mutex */
kern_sti();
return (EBUSY);
}
else {
/* the mutex is free */
PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]);
lev->nlocked[exec_shadow]++;
 
p->owner = exec_shadow;
 
kern_sti();
return 0;
}
}
 
static int PI_unlock(RLEVEL l, mutex_t *m)
{
PI_mutex_resource_des *lev;
PI_mutex_t *p;
int i, j;
// return 0;
p = (PI_mutex_t *)m->opt;
if (!p)
return (EINVAL);
 
if (p->owner != exec_shadow) {
/* the mutex is owned by another task!!! */
kern_sti();
return (EPERM);
}
 
proc_table[exec_shadow].context = kern_context_save();
 
/* the mutex is mine */
lev = (PI_mutex_resource_des *)(resource_table[l]);
lev->nlocked[exec_shadow]--;
 
p->owner = NIL;
 
/* we unblock all the waiting tasks... */
i = p->firstblocked;
p->firstblocked = NIL;
 
while (i != NIL) {
// kern_printf("<<%d>>", i);
proc_table[i].shadow = j = i;
i = lev->blocked[i];
lev->blocked[j] = NIL;
}
p->nblocked = 0;
 
/* {
int xxx;
kern_printf("(PI_unlock owner=%d ",p->owner);
for (xxx = 0; xxx<5; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow);
kern_printf(")\n");
}*/
 
scheduler();
kern_context_load(proc_table[exec_shadow].context);
 
return 0;
}
 
void PI_register_module(void)
{
RLEVEL l; /* the level that we register */
PI_mutex_resource_des *m; /* for readableness only */
PID i; /* a counter */
 
printk("PI_register_module\n");
 
/* request an entry in the level_table */
l = resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m = (PI_mutex_resource_des *)kern_alloc(sizeof(PI_mutex_resource_des));
 
/* update the level_table with the new entry */
resource_table[l] = (resource_des *)m;
 
/* fill the resource_des descriptor */
strncpy(m->m.r.res_name, PI_MODULENAME, MAX_MODULENAME);
m->m.r.res_code = PI_MODULE_CODE;
m->m.r.res_version = PI_MODULE_VERSION;
 
m->m.r.rtype = MUTEX_RTYPE;
 
m->m.r.resource_status = PI_resource_status;
m->m.r.level_accept_resource_model = PI_level_accept_resource_model;
m->m.r.res_register = PI_res_register;
 
m->m.r.res_detach = PI_res_detach;
 
/* fill the mutex_resource_des descriptor */
m->m.level_accept_mutexattr = PI_level_accept_mutexattr;
m->m.init = PI_init;
m->m.destroy = PI_destroy;
m->m.lock = PI_lock;
m->m.trylock = PI_trylock;
m->m.unlock = PI_unlock;
 
/* fille the PI_mutex_resource_des descriptor */
for (i=0; i<MAX_PROC; i++) {
m->nlocked[i] = 0;
m->blocked[i] = NIL;
}
}
 
/shark/tags/rel_0_2/kernel/modules/bd_pscan.c
0,0 → 1,135
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/*
* Copyright (C) 2000 Massimiliano Giorgi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
/*
* CVS : $Id: bd_pscan.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
*
* File: $File$
* Revision: $Revision: 1.1.1.1 $
* Last update: $Date: 2002-03-29 14:12:52 $
*/
 
#include <modules/bd_pscan.h>
 
#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <kernel/const.h>
#include <modules/codes.h>
#include <sys/types.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <kernel/assert.h>
 
#define LOWESTPRIORITY 255
#define HIGHERPRIORITY 0
 
static int mylevel=-1;
 
typedef struct TAGbd_pscan_resource_des
{
resource_des rd;
int priority[MAX_PROC];
} bd_pscan_resource_des;
 
static int res_level_accept_resource_model(RLEVEL l, RES_MODEL *r)
{
assertk(mylevel==l);
if (r->rclass==BDPSCAN_RCLASS||r->rclass==(BDPSCAN_RCLASS|l))
return 0;
else
return -1;
}
 
static void res_register(RLEVEL l, PID p, RES_MODEL *r)
{
bd_pscan_resource_des *m=(bd_pscan_resource_des*)(resource_table[l]);
BDPSCAN_RES_MODEL *rm=(BDPSCAN_RES_MODEL*)r;
assertk(mylevel==l);
m->priority[p]=rm->priority;
}
 
static void res_detach(RLEVEL l, PID p)
{
bd_pscan_resource_des *m=(bd_pscan_resource_des*)(resource_table[l]);
assertk(mylevel==l);
m->priority[p]=LOWESTPRIORITY;
}
 
static void res_resource_status(void)
{}
 
void BD_PSCAN_register_module(void)
{
RLEVEL l;
bd_pscan_resource_des *m;
int i;
/* request an entry in the level_table */
l=resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m=(bd_pscan_resource_des*)kern_alloc(sizeof(bd_pscan_resource_des));
 
/* update the level_table with the new entry */
resource_table[l]=(resource_des*)m;
 
/* fill the resource_des descriptor */
strcpy(m->rd.res_name,BDPSCAN_MODULENAME);
m->rd.res_code=BDPSCAN_MODULE_CODE;
m->rd.res_version=BDPSCAN_MODULE_VERSION;
m->rd.rtype=DEFAULT_RTYPE;
m->rd.resource_status=res_resource_status;
m->rd.level_accept_resource_model=res_level_accept_resource_model;
m->rd.res_register=res_register;
m->rd.res_detach=res_detach;
 
for (i=0;i<MAX_PROC;i++) m->priority[i]=LOWESTPRIORITY;
assertk(mylevel==-1);
mylevel=l;
}
 
int bd_pscan_getpriority(void)
{
bd_pscan_resource_des *m;
if (mylevel==-1) return LOWESTPRIORITY;
m=(bd_pscan_resource_des*)(resource_table[mylevel]);
return m->priority[exec_shadow];
}
/shark/tags/rel_0_2/kernel/modules/trccirc.c
0,0 → 1,393
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/*
* Copyright (C) 2000 Massimiliano Giorgi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
/*
* CVS : $Id: trccirc.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
*
* File: $File$
* Revision: $Revision: 1.1.1.1 $
* Last update: $Date: 2002-03-29 14:12:52 $
*/
 
#include <ll/sys/types.h>
#include <ll/stdlib.h>
 
#include <kernel/func.h>
#include <kernel/mem.h>
#include <kernel/log.h>
 
#include <trace/types.h>
#include <trace/trace.h>
#include <trace/queues.h>
 
#include <fs/fs.h>
 
#include <unistd.h>
#include <fcntl.h>
#include <limits.h>
 
/* Well... this file is very similar to trcfixed.c! */
 
typedef struct TAGcircular_queue_t {
/*+ size of the queue +*/
int size;
/*+ index of the next insertion into the queue +*/
int index;
/*+ index of the next item to write (if online_tracer activated) +*/
int windex;
/*+ number of events lost (if online_tracer activated) +*/
long hoops;
/*+ filename of the trace file +*/
char *filename;
/*+ flags from flags field of the initialization struct +*/
int flags;
/*+ some internal models have needed of extra parameters +*/
void *dummy;
/*+ unique number that identify the queue +*/
int uniq;
/*+ events table +*/
trc_event_t table[0];
} circular_queue_t;
 
static int mustgodown=0;
 
static TASK online_tracer(circular_queue_t *queue)
{
char pathname[PATH_MAX];
int handle;
int index;
 
if (queue->filename==NULL) trc_create_name("cir",queue->uniq,pathname);
else trc_create_name(queue->filename,0,pathname);
 
if (wait_for_fs_initialization()) {
printk(KERN_NOTICE "tracer online task not running");
goto BADEND;
}
handle=open(pathname,O_CREAT|O_TRUNC|O_WRONLY);
if (handle==-1) {
printk(KERN_NOTICE "tracer file %s not created!",pathname);
goto BADEND;
}
for (;;) {
while (queue->index!=queue->windex) {
index=queue->index;
if (index<queue->windex) {
write(handle,
queue->table+queue->windex,
(queue->size-queue->windex)*sizeof(trc_event_t)
);
queue->windex=0;
continue;
}
write(handle,
queue->table+queue->windex,
(index-queue->windex)*sizeof(trc_event_t)
);
queue->windex=index;
}
if (mustgodown) break;
task_endcycle();
}
close(handle);
printk(KERN_NOTICE "tracer file %s created!",pathname);
 
if (queue->hoops)
printk(KERN_NOTICE "tracer: %li event lost into %s",queue->hoops,pathname);
 
resume_fs_shutdown();
return NULL;
 
BADEND:
resume_fs_shutdown();
/*Why this? for a little bug on the task_makefree() routine */
for (;;) {
if (mustgodown) break;
task_endcycle();
}
return NULL;
}
 
static trc_event_t *circular_get2(circular_queue_t *queue)
{
if (queue->index==queue->size-1) {
if (queue->windex==0) {
queue->hoops++;
return NULL;
}
queue->index=0;
return &queue->table[queue->size-1];
}
if (queue->index+1==queue->windex) {
queue->hoops++;
return NULL;
}
return &queue->table[queue->index++];
}
 
static trc_event_t *circular_get1(circular_queue_t *queue)
{
if (queue->index==queue->size-1) {
queue->index=0;
return &queue->table[queue->size-1];
}
return &queue->table[queue->index++];
}
 
static int circular_post(circular_queue_t *queue)
{
return 0;
}
 
struct create_args {
long period;
long slice;
circular_queue_t *queue;
};
 
 
/* to remove!!! */
/*
static void circular_create_postponed(void *foo)
{
struct create_args *ptr=(struct create_args *)foo;
SOFT_TASK_MODEL model;
PID pid;
 
printk(KERN_DEBUG "postponed create: START");
soft_task_default_model(model);
soft_task_def_system(model);
soft_task_def_notrace(model);
soft_task_def_periodic(model);
soft_task_def_period(model,ptr->period);
soft_task_def_met(model,ptr->slice);
soft_task_def_wcet(model,ptr->slice);
soft_task_def_arg(model,ptr->queue);
 
printk(KERN_DEBUG "postponed create: A");
kern_free(foo,sizeof(struct create_args));
 
printk(KERN_DEBUG "postponed create: B");
pid=task_create("trcCirc",online_tracer,&model,NULL);
if (pid==-1) {
printk(KERN_ERR "can't start tracer online circular trace task");
} else {
printk(KERN_DEBUG "postponed create: C1");
task_activate(pid);
printk(KERN_DEBUG "postponed create: C2");
suspend_fs_shutdown();
printk(KERN_DEBUG "postponed create: C3");
}
 
printk(KERN_DEBUG "postponed create: END");
}
*/
 
static int circular_create(trc_queue_t *queue, TRC_CIRCULAR_PARMS *args)
{
TRC_CIRCULAR_PARMS defaultargs;
circular_queue_t *ptr;
 
if (args==NULL) {
trc_circular_default_parms(defaultargs);
args=&defaultargs;
}
ptr=(circular_queue_t*)kern_alloc(sizeof(circular_queue_t)+
sizeof(trc_event_t)*args->size);
if (ptr==NULL) return -1;
 
queue->get=(trc_event_t*(*)(void*))circular_get1;
queue->post=(int(*)(void*))circular_post;
queue->data=ptr;
ptr->size=args->size;
ptr->windex=ptr->index=0;
ptr->hoops=0;
ptr->filename=args->filename;
ptr->flags=args->flags;
ptr->dummy=NULL;
if (args->flags&TRC_CIRCULAR_ONLINETASK) {
struct create_args *p;
p=kern_alloc(sizeof(struct create_args));
if (p==NULL) {
printk(KERN_ERR "can't create tracer online circular trace task");
return -1;
}
queue->get=(trc_event_t*(*)(void*))circular_get2;
ptr->dummy=p;
p->period=args->period;
p->slice=args->slice;
p->queue=ptr;
//sys_atrunlevel(circular_create_postponed,(void*)p,RUNLEVEL_INIT);
}
return 0;
}
 
static int circular_activate(circular_queue_t *queue, int uniq)
{
queue->uniq=uniq;
 
if (queue->flags&TRC_CIRCULAR_ONLINETASK) {
struct create_args *ptr=(struct create_args *)queue->dummy;
SOFT_TASK_MODEL model;
PID pid;
 
printk(KERN_DEBUG "postponed create: START");
soft_task_default_model(model);
soft_task_def_system(model);
soft_task_def_notrace(model);
soft_task_def_periodic(model);
soft_task_def_period(model,ptr->period);
soft_task_def_met(model,ptr->slice);
soft_task_def_wcet(model,ptr->slice);
soft_task_def_arg(model,ptr->queue);
 
printk(KERN_DEBUG "postponed create: A");
kern_free(queue->dummy,sizeof(struct create_args));
 
printk(KERN_DEBUG "postponed create: B");
pid=task_create("trcCirc",online_tracer,&model,NULL);
if (pid==-1) {
printk(KERN_ERR "can't start tracer online circular trace task");
} else {
printk(KERN_DEBUG "postponed create: C1");
suspend_fs_shutdown();
printk(KERN_DEBUG "postponed create: C2");
task_activate(pid);
printk(KERN_DEBUG "postponed create: C3");
}
 
printk(KERN_DEBUG "postponed create: END");
}
return 0;
}
 
static TASK circular_shutdown(circular_queue_t *queue)
{
char pathname[PATH_MAX];
int h;
if (queue->filename==NULL) trc_create_name("cir",queue->uniq,pathname);
else trc_create_name(queue->filename,0,pathname);
h=open(pathname,O_CREAT|O_TRUNC|O_WRONLY);
if (h!=-1) {
if (queue->index!=queue->size-1)
write(h,
queue->table+queue->index+1,
(queue->size-queue->index-1)*sizeof(trc_event_t)
);
write(h,
queue->table,
queue->index*sizeof(trc_event_t)
);
close(h);
printk(KERN_NOTICE "tracer file %s created!",pathname);
} else
printk(KERN_NOTICE "tracer file %s NOT created!",pathname);
resume_fs_shutdown();
return NULL;
}
 
static int circular_terminate(circular_queue_t *queue)
{
SOFT_TASK_MODEL model;
PID pid;
 
mustgodown=1;
if (queue->flags&TRC_CIRCULAR_ONLINETASK) return 0;
 
suspend_fs_shutdown();
//nrt_task_default_model(model);
//nrt_task_def_system(model);
//nrt_task_def_arg(model,queue);
 
soft_task_default_model(model);
soft_task_def_system(model);
soft_task_def_notrace(model);
soft_task_def_periodic(model);
soft_task_def_period(model,50000);
soft_task_def_met(model,10000);
soft_task_def_wcet(model,10000);
soft_task_def_arg(model,queue);
pid=task_create("ShutTrcCir",circular_shutdown,&model,NULL);
if (pid==-1) {
printk(KERN_ERR "can't start tracer shutdown task (circular)");
return -1;
} else
task_activate(pid);
 
return 0;
}
 
int trc_register_circular_queue(void)
{
int res;
res=trc_register_queuetype(TRC_CIRCULAR_QUEUE,
(int(*)(trc_queue_t*,void*))circular_create,
(int(*)(void*,int))circular_activate,
(int(*)(void*))circular_terminate
);
if (res!=0) printk(KERN_WARNING "can't register tracer circular queue");
return res;
}
/shark/tags/rel_0_2/kernel/modules/npp.c
0,0 → 1,213
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: npp.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
------------
 
Non Preemptive Protocol. see npp.h for more details...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/npp.h>
 
#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <modules/codes.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/* The NPP resource level descriptor */
typedef struct {
mutex_resource_des m; /*+ the mutex interface +*/
 
int nlocked; /*+ how many mutex a task currently locks +*/
} NPP_mutex_resource_des;
 
 
/*+ print resource protocol statistics...+*/
static void NPP_resource_status(RLEVEL r)
{
NPP_mutex_resource_des *m = (NPP_mutex_resource_des *)(resource_table[r]);
 
kern_printf("%d Resources owned by the tasks %d\n", m->nlocked, exec_shadow);
}
 
static int NPP_level_accept_resource_model(RLEVEL l, RES_MODEL *r)
{
/* NPP works with all tasks without Resource parameters */
return -1;
}
 
static void NPP_res_register(RLEVEL l, PID p, RES_MODEL *r)
{
/* never called!!! */
}
 
static void NPP_res_detach(RLEVEL l, PID p)
{
NPP_mutex_resource_des *m = (NPP_mutex_resource_des *)(resource_table[l]);
 
if (m->nlocked)
kern_raise(XMUTEX_OWNER_KILLED, p);
}
 
static int NPP_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a)
{
if (a->mclass == NPP_MCLASS || a->mclass == (NPP_MCLASS | l) )
return 0;
else
return -1;
}
 
static int NPP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
{
m->mutexlevel = l;
m->opt = (void *)NIL;
 
return 0;
}
 
 
static int NPP_destroy(RLEVEL l, mutex_t *m)
{
// NPP_mutex_resource_des *lev = (NPP_mutex_resource_des *)(resource_table[l]);
 
if ( ((PID) m->opt) != NIL)
return (EBUSY);
 
return 0;
}
 
static int NPP_lock(RLEVEL l, mutex_t *m)
{
NPP_mutex_resource_des *lev;
 
kern_cli();
 
if (((PID)m->opt) == exec_shadow) {
/* the task already owns the mutex */
kern_sti();
return (EDEADLK);
}
 
/* p->opt == NIL (It can't be the case of p->opt != NIL and != exec_shadow
because when a task lock a mutex it become unpreemptable */
 
/* the mutex is free, We can lock it! */
lev = (NPP_mutex_resource_des *)(resource_table[l]);
 
if (!lev->nlocked) task_nopreempt();
lev->nlocked++;
 
m->opt = (void *)exec_shadow;
 
kern_sti();
 
return 0;
}
 
// static int NPP_trylock(RLEVEL l, mutex_t *m) is a non-sense!
 
static int NPP_unlock(RLEVEL l, mutex_t *m)
{
NPP_mutex_resource_des *lev;
 
/* the mutex is mine */
lev = (NPP_mutex_resource_des *)(resource_table[l]);
lev->nlocked--;
 
m->opt = (void *)NIL;
 
if (!lev->nlocked) task_preempt();
 
return 0;
}
 
void NPP_register_module(void)
{
RLEVEL l; /* the level that we register */
NPP_mutex_resource_des *m; /* for readableness only */
 
printk("NPP_register_module\n");
 
/* request an entry in the level_table */
l = resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m = (NPP_mutex_resource_des *)kern_alloc(sizeof(NPP_mutex_resource_des));
 
/* update the level_table with the new entry */
resource_table[l] = (resource_des *)m;
 
/* fill the resource_des descriptor */
strncpy(m->m.r.res_name, NPP_MODULENAME, MAX_MODULENAME);
m->m.r.res_code = NPP_MODULE_CODE;
m->m.r.res_version = NPP_MODULE_VERSION;
 
m->m.r.rtype = MUTEX_RTYPE;
 
m->m.r.resource_status = NPP_resource_status;
m->m.r.level_accept_resource_model = NPP_level_accept_resource_model;
m->m.r.res_register = NPP_res_register;
 
m->m.r.res_detach = NPP_res_detach;
 
/* fill the mutex_resource_des descriptor */
m->m.level_accept_mutexattr = NPP_level_accept_mutexattr;
m->m.init = NPP_init;
m->m.destroy = NPP_destroy;
m->m.lock = NPP_lock;
m->m.trylock = NPP_lock; // !!!!!!!!!!!!
m->m.unlock = NPP_unlock;
 
/* fill the NPP_mutex_resource_des descriptor */
m->nlocked = 0;
}
 
/shark/tags/rel_0_2/kernel/modules/edf.c
0,0 → 1,691
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: edf.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the scheduling module EDF (Earliest Deadline First)
 
Read edf.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/edf.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <kernel/trace.h>
 
//#define edf_printf kern_printf
#define edf_printf printk
 
/*+ Status used in the level +*/
#define EDF_READY MODULE_STATUS_BASE /*+ - Ready status +*/
#define EDF_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/
#define EDF_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/
#define EDF_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/
#define EDF_ZOMBIE MODULE_STATUS_BASE+5 /*+ to wait the free time +*/
 
/*+ flags +*/
#define EDF_FLAG_SPORADIC 1
#define EDF_FLAG_NORAISEEXC 2
 
/*+ the level redefinition for the Earliest Deadline First level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
TIME period[MAX_PROC]; /*+ The task periods; the deadlines are
stored in the priority field +*/
int deadline_timer[MAX_PROC];
/*+ The task deadline timers +*/
 
int flag[MAX_PROC];
/*+ used to manage the JOB_TASK_MODEL and the
periodicity +*/
 
IQUEUE ready; /*+ the ready queue +*/
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth +*/
 
} EDF_level_des;
 
 
static char *EDF_status_to_a(WORD status)
{
if (status < MODULE_STATUS_BASE)
return status_to_a(status);
 
switch (status) {
case EDF_READY : return "EDF_Ready";
case EDF_WCET_VIOLATED: return "EDF_Wcet_Violated";
case EDF_WAIT : return "EDF_Sporadic_Wait";
case EDF_IDLE : return "EDF_Idle";
case EDF_ZOMBIE : return "EDF_Zombie";
default : return "EDF_Unknown";
}
}
 
static void EDF_timer_deadline(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
struct timespec *temp;
 
edf_printf("$");
 
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
 
switch (proc_table[p].status) {
case EDF_ZOMBIE:
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
break;
 
case EDF_IDLE:
/* tracer stuff */
trc_logevent(TRC_INTACTIVATION,&p);
/* similar to EDF_task_activate */
temp = iq_query_timespec(p,&lev->ready);
TIMESPEC_ASSIGN(&proc_table[p].request_time,
temp);
ADDUSEC2TIMESPEC(lev->period[p], temp);
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(temp,
EDF_timer_deadline,
(void *)p);
edf_printf("(dline p%d ev%d %d.%d)",(int)p,(int)lev->deadline_timer[p],(int)temp->tv_sec,(int)temp->tv_nsec/1000);
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority );
event_need_reschedule();
printk("el%d|",p);
break;
 
case EDF_WAIT:
/* Without this, the task cannot be reactivated!!! */
proc_table[p].status = SLEEP;
break;
 
default:
/* else, a deadline miss occurred!!! */
edf_printf("\nstatus %d\n", (int)proc_table[p].status);
edf_printf("timer_deadline:AAARRRGGGHHH!!!");
kern_raise(XDEADLINE_MISS,p);
}
}
 
static void EDF_timer_guest_deadline(void *par)
{
PID p = (PID) par;
 
edf_printf("AAARRRGGGHHH!!!");
kern_raise(XDEADLINE_MISS,p);
}
 
static int EDF_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) {
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m;
 
if (h->wcet && h->mit)
return 0;
}
 
return -1;
}
 
static int EDF_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == JOB_PCLASS || m->pclass == (JOB_PCLASS | l))
return 0;
else
return -1;
}
 
 
static char *onoff(int i)
{
if (i)
return "On ";
else
return "Off";
}
 
static void EDF_level_status(LEVEL l)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->ready);
 
kern_printf("Wcet Check : %s\n",
onoff(lev->flags & EDF_ENABLE_WCET_CHECK));
kern_printf("On-line guarantee : %s\n",
onoff(lev->flags & EDF_ENABLE_GUARANTEE));
kern_printf("Used Bandwidth : %u/%u\n",
lev->U, MAX_BANDWIDTH);
 
while (p != NIL) {
if ((proc_table[p].pclass) == JOB_PCLASS)
kern_printf("Pid: %2d (GUEST)\n", p);
else
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n",
p,
proc_table[p].name,
lev->flag[p] & EDF_FLAG_SPORADIC ? "MinITime" : "Period ",
lev->period[p],
iq_query_timespec(p, &lev->ready)->tv_sec,
iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
EDF_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->ready);
}
 
for (p=0; p<MAX_PROC; p++)
if (proc_table[p].task_level == l && proc_table[p].status != EDF_READY
&& proc_table[p].status != FREE )
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n",
p,
proc_table[p].name,
lev->flag[p] & EDF_FLAG_SPORADIC ? "MinITime" : "Period ",
lev->period[p],
iq_query_timespec(p, &lev->ready)->tv_sec,
iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
EDF_status_to_a(proc_table[p].status));
}
 
/* The scheduler only gets the first task in the queue */
static PID EDF_level_scheduler(LEVEL l)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* { // print 4 dbg the ready queue
PID p= lev->ready;
kern_printf("(s");
while (p != NIL) {
kern_printf("%d ",p);
p = proc_table[p].next;
}
kern_printf(") ");
}
*/
return iq_query_first(&lev->ready);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int EDF_level_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
if (lev->flags & EDF_FAILED_GUARANTEE) {
*freebandwidth = 0;
return 0;
}
else
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
 
}
 
static int EDF_task_create(LEVEL l, PID p, TASK_MODEL *m)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* if the EDF_task_create is called, then the pclass must be a
valid pclass. */
 
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m;
 
lev->period[p] = h->mit;
 
if (h->periodicity == APERIODIC)
lev->flag[p] = EDF_FLAG_SPORADIC;
else
lev->flag[p] = 0;
lev->deadline_timer[p] = -1;
 
/* Enable wcet check */
if (lev->flags & EDF_ENABLE_WCET_CHECK) {
proc_table[p].avail_time = h->wcet;
proc_table[p].wcet = h->wcet;
proc_table[p].control |= CONTROL_CAP;
}
 
/* update the bandwidth... */
if (lev->flags & EDF_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / h->mit) * h->wcet;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b)
lev->U += b;
else
/* The task can NOT be guaranteed (U>MAX_BANDWIDTH)...
in this case, we don't raise an exception... in fact, after the
EDF_task_create the task_create will call level_guarantee that return
-1... return -1 in EDF_task_create isn't correct, because:
. generally, the guarantee must be done when also the resources
are registered
. returning -1 will cause the task_create to return with an errno
ETASK_CREATE instead of ENO_GUARANTEE!!!
 
Why I use the flag??? because if the lev->U overflows, if i.e. I set
it to MAX_BANDWIDTH, I lose the correct allocated bandwidth...
*/
lev->flags |= EDF_FAILED_GUARANTEE;
}
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void EDF_task_detach(LEVEL l, PID p)
{
/* the EDF level doesn't introduce any dinamic allocated new field.
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated
bandwidth */
 
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
if (lev->flags & EDF_FAILED_GUARANTEE)
lev->flags &= ~EDF_FAILED_GUARANTEE;
else
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet;
}
 
static int EDF_task_eligible(LEVEL l, PID p)
{
return 0; /* if the task p is chosen, it is always eligible */
}
 
static void EDF_task_dispatch(LEVEL l, PID p, int nostop)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
edf_printf("(disp p%d %d.%d)",(int)p,(int)schedule_time.tv_sec,(int)schedule_time.tv_nsec/1000);
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
static void EDF_task_epilogue(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
edf_printf("(epil p%d %d.%d)",p,(int)schedule_time.tv_sec,(int)schedule_time.tv_nsec/1000);
 
/* check if the wcet is finished... */
if ((lev->flags & EDF_ENABLE_WCET_CHECK) && proc_table[p].avail_time <= 0) {
/* if it is, raise a XWCET_VIOLATION exception */
kern_raise(XWCET_VIOLATION,p);
proc_table[p].status = EDF_WCET_VIOLATED;
}
else {
/* the task has been preempted. it returns into the ready queue... */
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
}
}
 
static void EDF_task_activate(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
struct timespec *temp;
 
if (proc_table[p].status == EDF_WAIT) {
kern_raise(XACTIVATION,p);
return;
}
 
/* Test if we are trying to activate a non sleeping task */
/* Ignore this; the task is already active */
if (proc_table[p].status != SLEEP &&
proc_table[p].status != EDF_WCET_VIOLATED)
return;
 
 
/* see also EDF_timer_deadline */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
temp = iq_query_timespec(p, &lev->ready);
TIMESPEC_ASSIGN(temp, &proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period[p], temp);
 
/* Insert task in the correct position */
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
 
/* Set the deadline timer */
lev->deadline_timer[p] = kern_event_post(temp,
EDF_timer_deadline,
(void *)p);
edf_printf("(dline p%d ev%d %d.%d)",p,(int)lev->deadline_timer[p],(int)temp->tv_sec,(int)temp->tv_nsec/1000);
}
 
static void EDF_task_insert(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* Similar to EDF_task_activate, but we don't check in what state
the task is and we don't set the request_time*/
 
/* Insert task in the coEDFect position */
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
}
 
static void EDF_task_extract(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
. the deadline must remain...
 
So, we do nothing!!!
*/
}
 
static void EDF_task_endcycle(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
edf_printf("(ecyc p%d %d.%d)",p,(int)schedule_time.tv_sec,(int)schedule_time.tv_nsec/1000);
 
/* the task has terminated his job before it consume the wcet. All OK! */
if (lev->flag[p] & EDF_FLAG_SPORADIC)
proc_table[p].status = EDF_WAIT;
else /* pclass = sporadic_pclass */
proc_table[p].status = EDF_IDLE;
 
/* we reset the capacity counters... */
if (lev->flags & EDF_ENABLE_WCET_CHECK)
proc_table[p].avail_time = proc_table[p].wcet;
 
/* when the deadline timer fire, it recognize the situation and set
correctly all the stuffs (like reactivation, request_time, etc... ) */
}
 
static void EDF_task_end(LEVEL l, PID p)
{
// EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
proc_table[p].status = EDF_ZOMBIE;
 
/* When the deadline timer fire, it put the task descriptor in
the free queue, and free the allocated bandwidth... */
}
 
static void EDF_task_sleep(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* the task has terminated his job before it consume the wcet. All OK! */
proc_table[p].status = EDF_WAIT;
 
/* we reset the capacity counters... */
if (lev->flags & EDF_ENABLE_WCET_CHECK)
proc_table[p].avail_time = proc_table[p].wcet;
 
/* when the deadline timer fire, it recognize the situation and set
correctly the task state to sleep... */
}
 
 
/* Guest Functions
These functions manages a JOB_TASK_MODEL, that is used to put
a guest task in the EDF ready queue. */
 
static int EDF_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
JOB_TASK_MODEL *job = (JOB_TASK_MODEL *)m;
 
/* if the EDF_guest_create is called, then the pclass must be a
valid pclass. */
 
*iq_query_timespec(p, &lev->ready) = job->deadline;
lev->deadline_timer[p] = -1;
 
if (job->noraiseexc)
lev->flag[p] = EDF_FLAG_NORAISEEXC;
else
lev->flag[p] = 0;
 
lev->period[p] = job->period;
 
/* there is no bandwidth guarantee at this level, it is performed
by the level that inserts guest tasks... */
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void EDF_guest_detach(LEVEL l, PID p)
{
/* the EDF level doesn't introduce any dinamic allocated new field.
No guarantee is performed on guest tasks... so we don't have to reset
the NO_GUARANTEE FIELD */
}
 
static void EDF_guest_dispatch(LEVEL l, PID p, int nostop)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* the task state is set to EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
static void EDF_guest_epilogue(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* the task has been preempted. it returns into the ready queue... */
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
}
 
static void EDF_guest_activate(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
 
/* Set the deadline timer */
if (!(lev->flag[p] & EDF_FLAG_NORAISEEXC))
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
EDF_timer_guest_deadline,
(void *)p);
 
}
 
static void EDF_guest_insert(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
}
 
static void EDF_guest_extract(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the state of the task is set by the calling function
. the deadline must remain...
 
So, we do nothing!!!
*/
}
 
static void EDF_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void EDF_guest_end(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
//kern_printf("EDF_guest_end: dline timer %d\n",lev->deadline_timer[p]);
if (proc_table[p].status == EDF_READY)
{
iq_extract(p, &lev->ready);
//kern_printf("(g_end rdy extr)");
}
 
/* we remove the deadline timer, because the slice is finished */
if (lev->deadline_timer[p] != NIL) {
// kern_printf("EDF_guest_end: dline timer %d\n",lev->deadline_timer[p]);
event_delete(lev->deadline_timer[p]);
lev->deadline_timer[p] = NIL;
}
 
}
 
static void EDF_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
/* Registration functions */
 
/*+ Registration function:
int flags the init flags ... see edf.h +*/
void EDF_register_level(int flags)
{
LEVEL l; /* the level that we register */
EDF_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("EDF_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor();
 
printk(" alloco descrittore %d %d\n",l,(int)sizeof(EDF_level_des));
 
/* alloc the space needed for the EDF_level_des */
lev = (EDF_level_des *)kern_alloc(sizeof(EDF_level_des));
 
printk(" lev=%d\n",(int)lev);
 
/* update the level_table with the new entry */
level_table[l] = (level_des *)lev;
 
/* fill the standard descriptor */
strncpy(lev->l.level_name, EDF_LEVELNAME, MAX_LEVELNAME);
lev->l.level_code = EDF_LEVEL_CODE;
lev->l.level_version = EDF_LEVEL_VERSION;
 
lev->l.level_accept_task_model = EDF_level_accept_task_model;
lev->l.level_accept_guest_model = EDF_level_accept_guest_model;
lev->l.level_status = EDF_level_status;
lev->l.level_scheduler = EDF_level_scheduler;
 
if (flags & EDF_ENABLE_GUARANTEE)
lev->l.level_guarantee = EDF_level_guarantee;
else
lev->l.level_guarantee = NULL;
 
lev->l.task_create = EDF_task_create;
lev->l.task_detach = EDF_task_detach;
lev->l.task_eligible = EDF_task_eligible;
lev->l.task_dispatch = EDF_task_dispatch;
lev->l.task_epilogue = EDF_task_epilogue;
lev->l.task_activate = EDF_task_activate;
lev->l.task_insert = EDF_task_insert;
lev->l.task_extract = EDF_task_extract;
lev->l.task_endcycle = EDF_task_endcycle;
lev->l.task_end = EDF_task_end;
lev->l.task_sleep = EDF_task_sleep;
 
lev->l.guest_create = EDF_guest_create;
lev->l.guest_detach = EDF_guest_detach;
lev->l.guest_dispatch = EDF_guest_dispatch;
lev->l.guest_epilogue = EDF_guest_epilogue;
lev->l.guest_activate = EDF_guest_activate;
lev->l.guest_insert = EDF_guest_insert;
lev->l.guest_extract = EDF_guest_extract;
lev->l.guest_endcycle = EDF_guest_endcycle;
lev->l.guest_end = EDF_guest_end;
lev->l.guest_sleep = EDF_guest_sleep;
 
/* fill the EDF descriptor part */
for(i=0; i<MAX_PROC; i++) {
lev->period[i] = 0;
lev->deadline_timer[i] = -1;
lev->flag[i] = 0;
}
 
iq_init(&lev->ready, &freedesc, 0);
lev->flags = flags & 0x07;
lev->U = 0;
}
 
bandwidth_t EDF_usedbandwidth(LEVEL l)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
if (lev->l.level_code == EDF_LEVEL_CODE &&
lev->l.level_version == EDF_LEVEL_VERSION)
return lev->U;
else
return 0;
}
 
/shark/tags/rel_0_2/kernel/modules/posix.c
0,0 → 1,601
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: posix.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the scheduling module compatible with POSIX
specifications
 
Read posix.h for further details.
 
RR tasks have the CONTROL_CAP bit set
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARR2ANTY; without even the implied waRR2anty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/posix.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/*+ Status used in the level +*/
#define POSIX_READY MODULE_STATUS_BASE
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
int priority[MAX_PROC]; /*+ priority of each task +*/
 
IQUEUE *ready; /*+ the ready queue array +*/
 
int slice; /*+ the level's time slice +*/
 
struct multiboot_info *multiboot; /*+ used if the level have to insert
the main task +*/
int maxpriority; /*+ the priority are from 0 to maxpriority
(i.e 0 to 31) +*/
 
int yielding; /*+ equal to 1 when a sched_yield is called +*/
 
} POSIX_level_des;
 
static char *POSIX_status_to_a(WORD status)
{
if (status < MODULE_STATUS_BASE)
return status_to_a(status);
 
switch (status) {
case POSIX_READY: return "POSIX_Ready";
default : return "POSIX_Unknown";
}
}
 
static int POSIX_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))
return 0;
else
return -1;
}
 
static int POSIX_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
{
return -1;
}
 
static void POSIX_level_status(LEVEL l)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
PID p;
 
kern_printf("Slice: %d \n", lev->slice);
 
for (p=0; p<MAX_PROC; p++)
if (proc_table[p].task_level == l && proc_table[p].status != POSIX_READY
&& proc_table[p].status != FREE )
kern_printf("Pid: %d\t Name: %20s Prio: %3d Status: %s\n",
p,proc_table[p].name,
lev->priority[p],
POSIX_status_to_a(proc_table[p].status));
 
}
 
 
/* This is not efficient but very fair :-)
The need of all this stuff is because if a task execute a long time
due to (shadow!) priority inheritance, then the task shall go to the
tail of the queue many times... */
static PID POSIX_level_scheduler(LEVEL l)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
PID p;
 
int prio;
 
prio = lev->maxpriority;
 
for (;;) {
p = iq_query_first(&lev->ready[prio]);
if (p == NIL) {
if (prio) {
prio--;
continue;
}
else
return NIL;
}
 
if ((proc_table[p].control & CONTROL_CAP) &&
(proc_table[p].avail_time <= 0)) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_extract(p,&lev->ready[prio]);
iq_insertlast(p,&lev->ready[prio]);
}
else
return p;
}
}
 
static int POSIX_level_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
/* the POSIX level always guarantee... the function is defined because
there can be an aperiodic server at a level with less priority than
the POSIX that need guarantee (e.g., a TBS server) */
return 1;
}
 
 
static int POSIX_task_create(LEVEL l, PID p, TASK_MODEL *m)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m;
 
/* the task state is set at SLEEP by the general task_create */
 
/* I used the wcet field because using wcet can account if a task
consume more than the timeslice... */
 
if (nrt->inherit == NRT_INHERIT_SCHED &&
proc_table[exec_shadow].task_level == l) {
/* We inherit the scheduling properties if the scheduling level
*is* the same */
lev->priority[p] = lev->priority[exec_shadow];
proc_table[p].avail_time = proc_table[exec_shadow].avail_time;
proc_table[p].wcet = proc_table[exec_shadow].wcet;
 
proc_table[p].control = (proc_table[p].control & ~CONTROL_CAP) |
(proc_table[exec_shadow].control & CONTROL_CAP);
lev->nact[p] = (lev->nact[exec_shadow] == -1) ? -1 : 0;
}
else {
lev->priority[p] = nrt->weight;
if (nrt->slice) {
proc_table[p].avail_time = nrt->slice;
proc_table[p].wcet = nrt->slice;
}
else {
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
}
if (nrt->policy == NRT_RR_POLICY)
proc_table[p].control |= CONTROL_CAP;
if (nrt->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
}
 
return 0; /* OK */
}
 
static void POSIX_task_detach(LEVEL l, PID p)
{
/* the POSIX level doesn't introduce any new field in the TASK_MODEL
so, all detach stuffs are done by the task_create
The task state is set at FREE by the general task_create */
}
 
static int POSIX_task_eligible(LEVEL l, PID p)
{
return 0; /* if the task p is chosen, it is always eligible */
}
 
static void POSIX_task_dispatch(LEVEL l, PID p, int nostop)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready[lev->priority[p]]);
}
 
static void POSIX_task_epilogue(LEVEL l, PID p)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
if (lev->yielding) {
lev->yielding = 0;
iq_insertlast(p,&lev->ready[lev->priority[p]]);
}
/* check if the slice is finished and insert the task in the coPOSIXect
qqueue position */
else if (proc_table[p].control & CONTROL_CAP &&
proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_insertlast(p,&lev->ready[lev->priority[p]]);
}
else
iq_insertfirst(p,&lev->ready[lev->priority[p]]);
 
proc_table[p].status = POSIX_READY;
}
 
static void POSIX_task_activate(LEVEL l, PID p)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
/* Test if we are trying to activate a non sleeping task */
/* save activation (only if needed...) */
if (proc_table[p].status != SLEEP) {
if (lev->nact[p] != -1)
lev->nact[p]++;
return;
}
 
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
/* Insert task in the correct position */
proc_table[p].status = POSIX_READY;
iq_insertlast(p,&lev->ready[lev->priority[p]]);
}
 
static void POSIX_task_insert(LEVEL l, PID p)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
/* Similar to POSIX_task_activate, but we don't check in what state
the task is and we don't set the request_time */
 
/* Insert task in the coPOSIXect position */
proc_table[p].status = POSIX_READY;
iq_insertlast(p,&lev->ready[lev->priority[p]]);
}
 
static void POSIX_task_extract(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
 
So, we do nothing!!!
*/
}
 
static void POSIX_task_endcycle(LEVEL l, PID p)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
if (lev->nact[p] > 0) {
/* continue!!!! */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
lev->nact[p]--;
iq_insertfirst(p,&lev->ready[lev->priority[p]]);
proc_table[p].status = POSIX_READY;
}
else
proc_table[p].status = SLEEP;
}
 
static void POSIX_task_end(LEVEL l, PID p)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
lev->nact[p] = -1;
 
/* then, we insert the task in the free queue */
proc_table[p].status = FREE;
iq_priority_insert(p,&freedesc);
}
 
static void POSIX_task_sleep(LEVEL l, PID p)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
lev->nact[p] = 0;
proc_table[p].status = SLEEP;
}
 
 
 
static int POSIX_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void POSIX_guest_detach(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_activate(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_insert(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_extract(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_end(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
static void POSIX_call_main(void *l)
{
LEVEL lev;
PID p;
NRT_TASK_MODEL m;
void *mb;
 
lev = (LEVEL)l;
 
nrt_task_default_model(m);
nrt_task_def_level(m,lev); /* with this we are sure that the task aPOSIXives
to the coPOSIXect level */
 
mb = ((POSIX_level_des *)level_table[lev])->multiboot;
nrt_task_def_arg(m,mb);
nrt_task_def_usemath(m);
nrt_task_def_nokill(m);
nrt_task_def_ctrl_jet(m);
nrt_task_def_weight(m,0);
nrt_task_def_policy(m,NRT_RR_POLICY);
nrt_task_def_inherit(m,NRT_EXPLICIT_SCHED);
 
p = task_create("Main", __init__, (TASK_MODEL *)&m, NULL);
 
if (p == NIL)
printk("\nPanic!!! can't create main task...\n");
 
POSIX_task_activate(lev,p);
}
 
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified +*/
void POSIX_register_level(TIME slice,
int createmain,
struct multiboot_info *mb,
int prioritylevels)
{
LEVEL l; /* the level that we register */
POSIX_level_des *lev; /* for readableness only */
PID i; /* a counter */
int x; /* a counter */
 
printk("POSIX_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor();
 
printk(" alloco descrittore %d %d\n",l,(int)sizeof(POSIX_level_des));
 
/* alloc the space needed for the POSIX_level_des */
lev = (POSIX_level_des *)kern_alloc(sizeof(POSIX_level_des));
 
printk(" lev=%d\n",(int)lev);
 
/* update the level_table with the new entry */
level_table[l] = (level_des *)lev;
 
/* fill the standard descriptor */
strncpy(lev->l.level_name, POSIX_LEVELNAME, MAX_LEVELNAME);
lev->l.level_code = POSIX_LEVEL_CODE;
lev->l.level_version = POSIX_LEVEL_VERSION;
 
lev->l.level_accept_task_model = POSIX_level_accept_task_model;
lev->l.level_accept_guest_model = POSIX_level_accept_guest_model;
lev->l.level_status = POSIX_level_status;
lev->l.level_scheduler = POSIX_level_scheduler;
lev->l.level_guarantee = POSIX_level_guarantee;
 
lev->l.task_create = POSIX_task_create;
lev->l.task_detach = POSIX_task_detach;
lev->l.task_eligible = POSIX_task_eligible;
lev->l.task_dispatch = POSIX_task_dispatch;
lev->l.task_epilogue = POSIX_task_epilogue;
lev->l.task_activate = POSIX_task_activate;
lev->l.task_insert = POSIX_task_insert;
lev->l.task_extract = POSIX_task_extract;
lev->l.task_endcycle = POSIX_task_endcycle;
lev->l.task_end = POSIX_task_end;
lev->l.task_sleep = POSIX_task_sleep;
 
lev->l.guest_create = POSIX_guest_create;
lev->l.guest_detach = POSIX_guest_detach;
lev->l.guest_dispatch = POSIX_guest_dispatch;
lev->l.guest_epilogue = POSIX_guest_epilogue;
lev->l.guest_activate = POSIX_guest_activate;
lev->l.guest_insert = POSIX_guest_insert;
lev->l.guest_extract = POSIX_guest_extract;
lev->l.guest_endcycle = POSIX_guest_endcycle;
lev->l.guest_end = POSIX_guest_end;
lev->l.guest_sleep = POSIX_guest_sleep;
 
/* fill the POSIX descriptor part */
for (i = 0; i < MAX_PROC; i++)
lev->nact[i] = -1;
 
lev->maxpriority = prioritylevels -1;
 
lev->ready = (IQUEUE *)kern_alloc(sizeof(IQUEUE) * prioritylevels);
 
for (x = 0; x < prioritylevels; x++)
iq_init(&lev->ready[x], &freedesc, 0);
 
if (slice < POSIX_MINIMUM_SLICE) slice = POSIX_MINIMUM_SLICE;
if (slice > POSIX_MAXIMUM_SLICE) slice = POSIX_MAXIMUM_SLICE;
lev->slice = slice;
 
lev->multiboot = mb;
 
if (createmain)
sys_atrunlevel(POSIX_call_main,(void *) l, RUNLEVEL_INIT);
}
 
/*+ this function forces the running task to go to his queue tail;
(it works only on the POSIX level) +*/
int POSIX_sched_yield(LEVEL l)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
if (l < 0 || l >= sched_levels)
return -1;
 
if (level_table[l]->level_code != POSIX_LEVEL_CODE ||
level_table[l]->level_version != POSIX_LEVEL_VERSION )
return -1;
 
if (proc_table[exec_shadow].task_level != l)
return -1;
 
proc_table[exec_shadow].context = kern_context_save();
lev->yielding = 1;
scheduler();
kern_context_load(proc_table[exec_shadow].context);
return 0;
}
 
/*+ this function returns the maximum level allowed for the POSIX level +*/
int POSIX_get_priority_max(LEVEL l)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
return lev->maxpriority;
}
 
/*+ this function returns the default timeslice for the POSIX level +*/
int POSIX_rr_get_interval(LEVEL l)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
return lev->slice;
}
 
/*+ this functions returns some paramaters of a task;
policy must be NRT_RR_POLICY or NRT_FIFO_POLICY;
priority must be in the range [0..prioritylevels]
returns ENOSYS or ESRCH if there are problems +*/
int POSIX_getschedparam(LEVEL l, PID p, int *policy, int *priority)
{
if (l < 0 || l >= sched_levels)
return ENOSYS;
 
if (level_table[l]->level_code != POSIX_LEVEL_CODE ||
level_table[l]->level_version != POSIX_LEVEL_VERSION )
return ENOSYS;
 
if (p<0 || p>= MAX_PROC || proc_table[p].status == FREE)
return ESRCH;
 
if (proc_table[p].task_level != l)
return ENOSYS;
 
if (proc_table[p].control & CONTROL_CAP)
*policy = NRT_RR_POLICY;
else
*policy = NRT_FIFO_POLICY;
 
*priority = ((POSIX_level_des *)(level_table[l]))->priority[p];
 
return 0;
}
 
/*+ this functions sets paramaters of a task +*/
int POSIX_setschedparam(LEVEL l, PID p, int policy, int priority)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
if (l < 0 || l >= sched_levels)
return ENOSYS;
 
if (level_table[l]->level_code != POSIX_LEVEL_CODE ||
level_table[l]->level_version != POSIX_LEVEL_VERSION )
return ENOSYS;
 
if (p<0 || p>= MAX_PROC || proc_table[p].status == FREE)
return ESRCH;
 
if (proc_table[p].task_level != l)
return ENOSYS;
 
if (policy == SCHED_RR)
proc_table[p].control |= CONTROL_CAP;
else if (policy == SCHED_FIFO)
proc_table[p].control &= ~CONTROL_CAP;
else
return EINVAL;
 
if (lev->priority[p] != priority) {
if (proc_table[p].status == POSIX_READY) {
iq_extract(p,&lev->ready[lev->priority[p]]);
lev->priority[p] = priority;
iq_insertlast(p,&lev->ready[priority]);
}
else
lev->priority[p] = priority;
}
 
return 0;
}
 
 
 
/shark/tags/rel_0_2/kernel/modules/hartport.c
0,0 → 1,675
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: hartport.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the Hartik 3.3.1 Port functions
 
Author: Giuseppe Lipari
Date: 2/7/96
 
File: Port.C (renamed to hartport.c)
Revision: 1.4
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/hartport.h>
#include <kernel/config.h>
#include <ll/ll.h>
#include <ll/string.h>
#include <ll/stdio.h>
#include <kernel/model.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <errno.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
 
static sem_t hash_mutex;
 
#define __PORT_DBG__
 
struct hash_port {
char name[MAX_PORT_NAME];
PID port_index;
BYTE blocked;
sem_t sem;
BYTE valid;
int port_int;
};
 
struct port_ker {
BYTE valid;
int next;
WORD dim_block;
BYTE *mem_block;
BYTE *head;
BYTE *tail;
BYTE type;
sem_t mutex;
sem_t pieno;
sem_t vuoto;
};
 
struct port_com {
BYTE valid;
int next;
PID proc_id;
BYTE access;
int port_index;
WORD dim_mes;
int h_index;
};
struct hash_port htable[MAX_HASH_ENTRY];
struct port_ker port_des[MAX_PORT];
struct port_com port_int[MAX_PORT_INT];
int freeportdes;
int freeportint;
 
static int port_installed = 0;
 
/*----------------------------------------------------------------------*/
/* port_init() : inizializza le strutture delle porte; da chiamare */
/* dentro __hartik_register_levels__(). */
/*----------------------------------------------------------------------*/
void HARTPORT_init(void)
{
int i;
 
if (port_installed)
return;
port_installed = 1;
 
/* Init hash table */
for (i = 0; i < MAX_HASH_ENTRY; i++) {
htable[i].valid = FALSE;
htable[i].port_int = NIL;
htable[i].blocked = 0;
}
 
/* mutex sem on the hash table */
sem_init(&hash_mutex, 0, 1);
 
/* init the port descriptor table */
for (i = 0; i < MAX_PORT - 1; i++) {
port_des[i].next = i+1;
port_des[i].valid = FALSE;
}
port_des[MAX_PORT - 1].next = NIL;
port_des[MAX_PORT - 1].valid = FALSE;
freeportdes = 0;
 
/* Init the port interface table */
for (i = 0; i < MAX_PORT_INT - 1; i++) {
port_int[i].next = i+1;
port_int[i].valid = FALSE;
}
port_int[MAX_PORT_INT - 1].next = NIL;
port_int[MAX_PORT_INT - 1].valid = FALSE;
// for (i = PORT_NO_MORE_DESCR; i <= PORT_UNVALID_DESCR; i++)
// exc_set(i,port_exception);
freeportint = 0;
}
 
 
/*----------------------------------------------------------------------*/
/* hash_fun() : address hash table */
/*----------------------------------------------------------------------*/
static int hash_fun(char *name)
{
return (*name % MAX_HASH_ENTRY);
}
 
 
/*----------------------------------------------------------------------*/
/* getfreedes : restituisce l'indice di un descrittore di porta libero */
/*----------------------------------------------------------------------*/
static int getfreedes(void)
{
int p;
 
kern_cli();
if (freeportdes == NIL) {
errno = EPORT_NO_MORE_DESCR;
kern_sti();
return -1;
}
p = freeportdes;
freeportdes = port_des[p].next;
kern_sti();
return(p);
}
 
/*----------------------------------------------------------------------*/
/* ungetdes() : mette il descrittore tra quelli disponibile */
/*----------------------------------------------------------------------*/
static void ungetdes(int pd)
{
kern_cli();
port_des[pd].next = freeportdes;
freeportdes = pd;
kern_sti();
}
 
/*----------------------------------------------------------------------*/
/* get freeint : restituisce una interfaccia di porta libera */
/*----------------------------------------------------------------------*/
static int getfreeint(void)
{
int p;
 
kern_cli();
if (freeportint == NIL) {
errno = EPORT_NO_MORE_INTERF;
kern_sti();
return -1;
}
p = freeportint;
freeportint = port_int[p].next;
kern_sti();
return(p);
}
 
/*----------------------------------------------------------------------*/
/* ungetint : rende disponibile l'interfaccia di porta specificata */
/*----------------------------------------------------------------------*/
static void ungetint(int pi)
{
kern_cli();
port_int[pi].next = freeportint;
freeportint = pi;
kern_sti();
}
 
/*----------------------------------------------------------------------*/
/* port_create(): Apre la porta specificata dalla stringa, effettuando */
/* automaticamente il collegamento */
/* WARNING : La funzione e' bloccante per la mutua esclusione sulla */
/* hash table */
/*----------------------------------------------------------------------*/
PORT port_create(char *name, int dim_mes, int num_mes, BYTE type, BYTE access)
{
int i, pd, pi;
WORD letti = 0;
BYTE flag = FALSE;
 
/*
Prendo un descrittore di porta.
*/
#ifdef __PORT_DBG__
if ((type == MAILBOX) && (access == WRITE)) {
errno = EPORT_INCOMPAT_MESSAGE;
return -1;
}
if ((type == STICK ) && (access == READ )) {
errno = EPORT_INCOMPAT_MESSAGE;
return -1;
}
#endif
pd = getfreedes();
if (pd == -1) return -1;
/*
Devo andare in mutua esclusione sulla tabella di hash ! Poiche' questa
viene acceduta dalle openport e dalle connect sono sicuro che una
connect non puo' interrompere una openport.
*/
sem_wait(&hash_mutex);
/*
Scorro la tabella di hash fino a trovare un'entrata libera, oppure
una entrata occupata in precedenza da una connect che ci si e' bloccata
sopra (blocked > 0). Se ne trovo una gia' aperta da un'altra openport
esco con errore. Lo stesso se sono state occupate tutte le entrate della
tabella (tramite la var. letti);
*/
i = hash_fun(name);
while (!flag) {
if (htable[i].valid == FALSE) flag = TRUE;
else {
if (strcmp(htable[i].name,name) == 0) {
if (htable[i].blocked == 0) {
errno = EPORT_ALREADY_OPEN;
sem_post(&hash_mutex);
return -1;
}
else flag = TRUE;
}
else {
i = (i+1) % MAX_HASH_ENTRY;
letti++;
}
}
if (letti > MAX_HASH_ENTRY-1) {
errno = EPORT_NO_MORE_HASHENTRY;
sem_post(&hash_mutex);
return -1;
}
 
}
htable[i].valid = TRUE;
strcpy(htable[i].name, name);
 
htable[i].port_index = pd;
/*
A questo punto inizializzo tutta la struttura del descrittore di porta
*/
if (type == STICK) port_des[pd].dim_block = dim_mes;
else port_des[pd].dim_block = dim_mes * num_mes;
 
kern_cli();
port_des[pd].mem_block = kern_alloc(port_des[pd].dim_block);
kern_sti();
if (port_des[pd].mem_block == NULL) {
errno = EPORT_2_CONNECT;
sem_post(&hash_mutex);
return -1;
}
 
port_des[pd].head = port_des[pd].tail = port_des[pd].mem_block;
 
sem_init(&port_des[pd].mutex,0,1);
sem_init(&port_des[pd].pieno,0,port_des[pd].dim_block);
sem_init(&port_des[pd].vuoto,0,0);
port_des[pd].type = type;
/*
Prendo e inizializzo la struttura dell'interfaccia di porta verso il
processo (e' la varibile pi quella che restituisco)
*/
pi = getfreeint();
if (pi == -1) {
sem_post(&hash_mutex);
return -1;
}
/* port_int[pi].proc_id = exec_shadow; */
port_int[pi].access = access;
port_int[pi].port_index = pd;
port_int[pi].dim_mes = dim_mes;
port_int[pi].next = NIL;
port_int[pi].h_index = i;
port_des[pd].valid = TRUE;
port_int[pi].valid = TRUE;
/*
Sblocco eventuali processi che avevano fatto la connect nella coda
semaforica che sta nell'entrata relativa della hash table !
*/
if (htable[i].blocked > 0) {
sem_xpost(&htable[i].sem, htable[i].blocked);
htable[i].blocked = 0;
sem_destroy(&htable[i].sem);
}
/*
Infine libero la mutua esclusione.
*/
sem_post(&hash_mutex);
return (pi);
}
 
/*----------------------------------------------------------------------*/
/* port_connect(): collega la porta specificata dalla stringa. */
/* WARNING : La funzione e' bloccante per la mutua esclusione sulle */
/* strutture delle porte */
/*----------------------------------------------------------------------*/
PORT port_connect(char *name, int dim_mes, BYTE type, BYTE access)
{
int i, pi, pd, pn,letti = 0;
BYTE flag = FALSE, create = FALSE;
 
#ifdef __PORT_DBG__
if ((type == MAILBOX) && (access == READ )) {
errno = EPORT_INCOMPAT_MESSAGE;
return -1;
}
if ((type == STICK ) && (access == WRITE)) {
errno = EPORT_INCOMPAT_MESSAGE;
return -1;
}
#endif
/*
Per prima cosa mi prendo una interfaccia di porta e la riempio
parzialmente.
*/
pi = getfreeint();
if (pi == -1) return -1;
/* port_int[pi].proc_id = exec_shadow; */
port_int[pi].access = access;
port_int[pi].dim_mes = dim_mes;
port_int[pi].next = NIL;
/*
Mutua esclusione sulla tabella di hash
*/
sem_wait(&hash_mutex);
/*
Cerco il descrittore appropriato : Se la porta e' gia' stata aperta
allora esco dal ciclo con flag = TRUE, create = FALSE, l'indice i che
punta all'entrata della tabella di hash, e con htable[i].blocked = 0;
In tutti gli altri casi significa che la porta non e' stata aperta e
quindi devo bloccarmi.
*/
i = hash_fun(name);
while (!flag) {
/* Devo crearla */
if (htable[i].valid == FALSE) {
flag = TRUE;
create = TRUE;
}
/* l'ho trovata (ma non so ancora se e' stata aperta) */
else if (strcmp(htable[i].name, name) == 0) flag = TRUE;
/* scandisco l'entrata successiva */
else {
i = (i+1) % MAX_HASH_ENTRY;
letti ++;
}
#ifdef __PORT_DBG__
/* se ho scorso tutto l'array senza trovare nulla */
if (letti > MAX_HASH_ENTRY) {
errno = EPORT_NO_MORE_HASHENTRY;
sem_post(&hash_mutex);
return -1;
}
#endif
}
/*
Se devo aspettare che venga aperta (create = TRUE) allora mi blocco in
attesa sul semaforo della htable. Per non avere troppi semafori inutili
in giro, me lo creo sul momento.
*/
if (create == TRUE) {
htable[i].valid = TRUE;
htable[i].blocked = 1;
strcpy(htable[i].name, name);
sem_init(&htable[i].sem, 0, 0);
sem_post(&hash_mutex);
sem_xwait(&htable[i].sem,1,BLOCK);
}
/*
Se invece si e' gia' bloccata un'altra connect sopra, mi blocco anch'io.
in ogni caso devo liberare la mutua esclusione sulla hash dato che ho
gia' fatto tutto quello che dovevo fare con la hash.
*/
else {
if (htable[i].blocked > 0) {
htable[i].blocked++;
sem_post(&hash_mutex);
sem_xwait(&htable[i].sem,1,BLOCK);
}
else sem_post(&hash_mutex);
}
/*
Controlli di errore.
*/
pd = htable[i].port_index;
#ifdef __PORT_DBG__
if (type != port_des[pd].type) {
errno = EPORT_UNSUPPORTED_ACC;
return -1;
}
if ((type == STICK) && (dim_mes != port_des[pd].dim_block)) {
errno = EPORT_WRONG_OP;
return -1;
}
if ((type != STICK) && (port_des[pd].dim_block % dim_mes) != 0) {
errno = EPORT_WRONG_OP;
return -1;
}
#endif
 
sem_wait(&hash_mutex);
pn = htable[i].port_int;
if (pn != NIL) {
#ifdef __PORT_DBG__
if (type == STREAM) {
errno = EPORT_WRONG_TYPE;
sem_post(&hash_mutex);
return -1;
}
if (dim_mes != port_int[pn].dim_mes) {
errno = EPORT_WRONG_OP;
sem_post(&hash_mutex);
return -1;
}
#endif
port_int[pi].next = htable[i].port_int;
htable[i].port_int = pi;
}
else htable[i].port_int = pi;
sem_post(&hash_mutex);
port_int[pi].h_index = i;
port_int[pi].port_index = pd;
port_int[pi].valid = TRUE;
return(pi);
}
 
/*----------------------------------------------------------------------*/
/* port_delete() : inversa di port_open, libera tutto */
/*----------------------------------------------------------------------*/
void port_delete(PORT pi)
{
int i;
struct port_ker *pd;
struct port_com *pp;
 
pp = &port_int[pi];
sem_wait(&hash_mutex);
i = pp->h_index;
pd = &port_des[htable[i].port_index];
pd->valid = FALSE;
sem_destroy(&pd->mutex);
sem_destroy(&pd->pieno);
sem_destroy(&pd->vuoto);
kern_cli();
kern_free(pd->mem_block, pd->dim_block);
kern_sti();
 
ungetdes(htable[i].port_index);
pp->valid = FALSE;
htable[i].port_int = pp->next;
ungetint(pi);
htable[i].valid = FALSE;
sem_post(&hash_mutex);
}
 
/*----------------------------------------------------------------------*/
/* port_disconnect() : libera l'interfaccia di porta */
/*----------------------------------------------------------------------*/
void port_disconnect(PORT pi)
{
sem_wait(&hash_mutex);
if (htable[port_int[pi].h_index].valid == TRUE)
htable[port_int[pi].h_index].port_int = port_int[pi].next;
port_int[pi].valid = FALSE;
ungetint(pi);
sem_post(&hash_mutex);
}
 
/*----------------------------------------------------------------------*/
/* port_send() : Invia un messaggio alla porta */
/*----------------------------------------------------------------------*/
WORD port_send(PORT pi, void *msg, BYTE wait)
{
struct port_ker *pd;
struct port_com *pp;
 
pp = &(port_int[pi]);
pd = &(port_des[pp->port_index]);
#ifdef __PORT_DBG__
if (pp->access == READ) {
errno = EPORT_WRONG_OP;
return -1;
}
if (!pd->valid) {
errno = EPORT_INVALID_DESCR;
return -1;
}
 
#endif
 
if (pd->type == STICK) sem_wait(&pd->mutex);
else if (pd->type == STREAM) {
if (sem_xwait(&pd->pieno,pp->dim_mes,wait)) return(FALSE);
}
else {
if (sem_xwait(&pd->pieno, pp->dim_mes,wait)) return(FALSE);
sem_wait(&pd->mutex);
}
 
memcpy(pd->head, msg, pp->dim_mes);
 
pd->head += pp->dim_mes;
if (pd->head >= (pd->mem_block + pd->dim_block))
pd->head -= pd->dim_block;
 
if (pd->type == STICK) sem_post(&pd->mutex);
else if (pd->type == STREAM) sem_xpost(&pd->vuoto, pp->dim_mes);
else {
sem_xpost(&pd->vuoto, pp->dim_mes);
sem_post(&pd->mutex);
}
return(TRUE);
}
 
/*----------------------------------------------------------------------*/
/* port_receive() : Riceve un messaggio dalla porta */
/*----------------------------------------------------------------------*/
WORD port_receive(PORT pi,void *msg,BYTE wait)
{
struct port_ker *pd;
struct port_com *pp;
 
pp = &(port_int[pi]);
pd = &(port_des[pp->port_index]);
#ifdef __PORT_DBG__
if (pp->access == WRITE) {
errno = EPORT_WRONG_OP;
return -1;
}
if (!pd->valid) {
errno = EPORT_INVALID_DESCR;
return -1;
}
#endif
 
if (pd->type == STICK) sem_wait(&pd->mutex);
else if (pd->type == STREAM) {
if (sem_xwait(&pd->vuoto,pp->dim_mes,wait)) return(FALSE);
}
else {
if (sem_xwait(&pd->vuoto,pp->dim_mes,wait)) return(FALSE);
sem_wait(&pd->mutex);
}
 
memcpy(msg, pd->tail, pp->dim_mes);
 
pd->tail += pp->dim_mes;
if (pd->tail >= (pd->mem_block + pd->dim_block))
pd->tail -= pd->dim_block;
 
if (pd->type == STICK) sem_post(&pd->mutex);
else if (pd->type == STREAM) sem_xpost(&pd->pieno, pp->dim_mes);
else {
sem_xpost(&pd->pieno, pp->dim_mes);
sem_post(&pd->mutex);
}
return(TRUE);
}
 
#ifdef __PORT_DBG__
 
void print_port(void)
{
int i;
struct port_ker *pd;
struct port_com *pp;
/*
kern_printf("Hash Table :\n");
for (i=0; i<MAX_HASH_ENTRY; i++)
kern_printf("%d\tvl: %d\tbl: %d\tpd: %d\t%s\n", i,
htable[i].valid, htable[i].blocked, htable[i].port_index,
htable[i].name);
*/
kern_printf("Port des :\n");
kern_printf("Free port des : %d\n", freeportdes);
for (i=0; i<MAX_PORT_INT; i++)
if (port_int[i].valid) {
pp = &port_int[i];
pd = &port_des[pp->port_index];
kern_printf("%d %s vt: %d pn: %d\n",i,htable[pp->h_index].name,
pd->vuoto,pd->pieno);
}
/*
kern_printf("Port int :\n");
kern_printf("Free port int : %d\n", freeportint);
for (i=0; i<MAX_PORT_INT; i++)
kern_printf("%d vl : %d dm : %d port_index : %d proc_id : %d\n", i,
port_int[i].valid, port_int[i].dim_mes,
port_int[i].port_index, port_int[i].proc_id);
*/
}
 
void port_write(PORT p)
{
struct port_ker *pd;
struct port_com *pp;
char msg[80];
 
pp = &port_int[p];
pd = &port_des[pp->port_index];
 
kern_printf(msg,"%d pd: %d vt: %d pn: %d ",p,pp->port_index,
pd->vuoto,pd->pieno);
}
 
#endif
/shark/tags/rel_0_2/kernel/modules/rr2.c
0,0 → 1,446
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: rr2.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the scheduling module RR2 (Round Robin) version 2
 
Read rr2.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARR2ANTY; without even the implied waRR2anty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/rr2.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/*+ Status used in the level +*/
#define RR2_READY MODULE_STATUS_BASE
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
IQUEUE ready; /*+ the ready queue +*/
 
int slice; /*+ the level's time slice +*/
 
struct multiboot_info *multiboot; /*+ used if the level have to insert
the main task +*/
} RR2_level_des;
 
 
static char *RR2_status_to_a(WORD status)
{
if (status < MODULE_STATUS_BASE)
return status_to_a(status);
 
switch (status) {
case RR2_READY: return "RR2_Ready";
default : return "RR2_Unknown";
}
}
 
static int RR2_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))
return 0;
else
return -1;
}
 
static int RR2_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
{
return -1;
}
 
static void RR2_level_status(LEVEL l)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->ready);
 
kern_printf("Slice: %d \n", lev->slice);
 
while (p != NIL) {
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
RR2_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->ready);
}
 
for (p=0; p<MAX_PROC; p++)
if (proc_table[p].task_level == l && proc_table[p].status != RR2_READY
&& proc_table[p].status != FREE )
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
RR2_status_to_a(proc_table[p].status));
 
}
 
 
/* This is not efficient but very fair :-)
The need of all this stuff is because if a task execute a long time
due to (shadow!) priority inheritance, then the task shall go to the
tail of the queue many times... */
static PID RR2_level_scheduler(LEVEL l)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
PID p;
 
for (;;) {
p = iq_query_first(&lev->ready);
if (p == -1)
return p;
 
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_extract(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
else
return p;
}
}
 
static int RR2_level_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
/* the RR2 level always guarantee... the function is defined because
there can be an aperiodic server at a level with less priority than
the RR2 that need guarantee (e.g., a TBS server) */
return 1;
}
 
 
static int RR2_task_create(LEVEL l, PID p, TASK_MODEL *m)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m;
 
/* the task state is set at SLEEP by the general task_create
the only thing to set remains the capacity stuffs that are set
to the values passed in the model... */
 
/* I used the wcet field because using wcet can account if a task
consume more than the timeslice... */
 
if (nrt->slice) {
proc_table[p].avail_time = nrt->slice;
proc_table[p].wcet = nrt->slice;
}
else {
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
}
proc_table[p].control |= CONTROL_CAP;
 
if (nrt->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
 
return 0; /* OK */
}
 
static void RR2_task_detach(LEVEL l, PID p)
{
/* the RR2 level doesn't introduce any new field in the TASK_MODEL
so, all detach stuffs are done by the task_create
The task state is set at FREE by the general task_create */
}
 
static int RR2_task_eligible(LEVEL l, PID p)
{
return 0; /* if the task p is chosen, it is always eligible */
}
 
static void RR2_task_dispatch(LEVEL l, PID p, int nostop)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
static void RR2_task_epilogue(LEVEL l, PID p)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
/* check if the slice is finished and insert the task in the coRR2ect
qqueue position */
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_insertlast(p,&lev->ready);
}
else
/* cuRR2 is >0, so the running task have to run for another cuRR2 usec */
iq_insertfirst(p,&lev->ready);
 
proc_table[p].status = RR2_READY;
}
 
static void RR2_task_activate(LEVEL l, PID p)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
/* Test if we are trying to activate a non sleeping task */
/* save activation (only if needed... */
if (proc_table[p].status != SLEEP) {
if (lev->nact[p] != -1)
lev->nact[p]++;
return;
}
 
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
/* Insert task in the coRR2ect position */
proc_table[p].status = RR2_READY;
iq_insertlast(p,&lev->ready);
}
 
static void RR2_task_insert(LEVEL l, PID p)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
/* Similar to RR2_task_activate, but we don't check in what state
the task is and we don't set the request_time */
 
/* Insert task in the coRR2ect position */
proc_table[p].status = RR2_READY;
iq_insertlast(p,&lev->ready);
}
 
static void RR2_task_extract(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
 
So, we do nothing!!!
*/
}
 
static void RR2_task_endcycle(LEVEL l, PID p)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
if (lev->nact[p] > 0) {
/* continue!!!! */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
lev->nact[p]--;
iq_insertfirst(p,&lev->ready);
proc_table[p].status = RR2_READY;
}
else
proc_table[p].status = SLEEP;
}
 
static void RR2_task_end(LEVEL l, PID p)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
lev->nact[p] = -1;
 
/* then, we insert the task in the free queue */
proc_table[p].status = FREE;
iq_insertlast(p,&freedesc);
}
 
static void RR2_task_sleep(LEVEL l, PID p)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
if (lev->nact[p] >= 0) lev->nact[p] = 0;
proc_table[p].status = SLEEP;
}
 
static int RR2_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void RR2_guest_detach(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_activate(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_insert(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_extract(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_end(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
static void RR2_call_main(void *l)
{
LEVEL lev;
PID p;
NRT_TASK_MODEL m;
void *mb;
 
lev = (LEVEL)l;
 
nrt_task_default_model(m);
nrt_task_def_level(m,lev); /* with this we are sure that the task aRR2ives
to the coRR2ect level */
 
mb = ((RR2_level_des *)level_table[lev])->multiboot;
nrt_task_def_arg(m,mb);
nrt_task_def_usemath(m);
nrt_task_def_nokill(m);
nrt_task_def_ctrl_jet(m);
 
p = task_create("Main", __init__, (TASK_MODEL *)&m, NULL);
 
if (p == NIL)
printk("\nPanic!!! can't create main task...\n");
 
RR2_task_activate(lev,p);
}
 
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified +*/
void RR2_register_level(TIME slice,
int createmain,
struct multiboot_info *mb)
{
LEVEL l; /* the level that we register */
RR2_level_des *lev; /* for readableness only */
PID i;
 
printk("RR2_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor();
 
/* alloc the space needed for the RR2_level_des */
lev = (RR2_level_des *)kern_alloc(sizeof(RR2_level_des));
 
printk(" lev=%d\n",(int)lev);
 
/* update the level_table with the new entry */
level_table[l] = (level_des *)lev;
 
/* fill the standard descriptor */
strncpy(lev->l.level_name, RR2_LEVELNAME, MAX_LEVELNAME);
lev->l.level_code = RR2_LEVEL_CODE;
lev->l.level_version = RR2_LEVEL_VERSION;
 
lev->l.level_accept_task_model = RR2_level_accept_task_model;
lev->l.level_accept_guest_model = RR2_level_accept_guest_model;
lev->l.level_status = RR2_level_status;
lev->l.level_scheduler = RR2_level_scheduler;
lev->l.level_guarantee = RR2_level_guarantee;
 
lev->l.task_create = RR2_task_create;
lev->l.task_detach = RR2_task_detach;
lev->l.task_eligible = RR2_task_eligible;
lev->l.task_dispatch = RR2_task_dispatch;
lev->l.task_epilogue = RR2_task_epilogue;
lev->l.task_activate = RR2_task_activate;
lev->l.task_insert = RR2_task_insert;
lev->l.task_extract = RR2_task_extract;
lev->l.task_endcycle = RR2_task_endcycle;
lev->l.task_end = RR2_task_end;
lev->l.task_sleep = RR2_task_sleep;
 
lev->l.guest_create = RR2_guest_create;
lev->l.guest_detach = RR2_guest_detach;
lev->l.guest_dispatch = RR2_guest_dispatch;
lev->l.guest_epilogue = RR2_guest_epilogue;
lev->l.guest_activate = RR2_guest_activate;
lev->l.guest_insert = RR2_guest_insert;
lev->l.guest_extract = RR2_guest_extract;
lev->l.guest_endcycle = RR2_guest_endcycle;
lev->l.guest_end = RR2_guest_end;
lev->l.guest_sleep = RR2_guest_sleep;
 
/* fill the RR2 descriptor part */
for (i = 0; i < MAX_PROC; i++)
lev->nact[i] = -1;
 
iq_init(&lev->ready, &freedesc, 0);
 
if (slice < RR2_MINIMUM_SLICE) slice = RR2_MINIMUM_SLICE;
if (slice > RR2_MAXIMUM_SLICE) slice = RR2_MAXIMUM_SLICE;
lev->slice = slice;
 
lev->multiboot = mb;
 
if (createmain)
sys_atrunlevel(RR2_call_main,(void *) l, RUNLEVEL_INIT);
}
 
 
/shark/tags/rel_0_2/kernel/modules/ds.c
0,0 → 1,652
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: ds.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the aperiodic server DS (Deferrable Server)
 
This module is directly derived from the Polling server one.
All the notes writed for the PS are valid for the DS.
 
The difference between DS and PS is that when there are not task to
schedule the capacity is not reset to 0...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/ds.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/*+ Status used in the level +*/
#define DS_WAIT APER_STATUS_BASE /*+ waiting the service +*/
 
/*+ the level redefinition for the Total Bandwidth Server level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
/* The wcet are stored in the task descriptor's priority
field, so no other fields are needed */
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
struct timespec lastdline; /*+ the last deadline assigned to
a DS task +*/
 
int Cs; /*+ server capacity +*/
int availCs; /*+ server avail time +*/
 
IQUEUE wait; /*+ the wait queue of the DS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth by the server +*/
int period;
 
LEVEL scheduling_level;
 
} DS_level_des;
 
/* This static function activates the task pointed by lev->activated) */
static __inline__ void DS_activation(DS_level_des *lev)
{
PID p; /* for readableness */
JOB_TASK_MODEL j; /* the guest model */
LEVEL m; /* the master level... only for readableness*/
 
p = lev->activated;
m = lev->scheduling_level;
job_task_default_model(j,lev->lastdline);
job_task_def_period(j,lev->period);
level_table[m]->guest_create(m,p,(TASK_MODEL *)&j);
level_table[m]->guest_activate(m,p);
// kern_printf("(%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec);
}
 
static void DS_deadline_timer(void *a)
{
DS_level_des *lev = (DS_level_des *)(level_table[(LEVEL)a]);
 
ADDUSEC2TIMESPEC(lev->period, &lev->lastdline);
 
// kern_printf("(%d:%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec, lev->period);
if (lev->availCs >= 0)
lev->availCs = lev->Cs;
else
lev->availCs += lev->Cs;
 
/* availCs may be <0 because a task executed via a shadow fo many time
lev->activated == NIL only if the prec task was finished and there
was not any other task to be put in the ready queue
... we are now activating the next task */
if (lev->availCs > 0 && lev->activated == NIL) {
if (iq_query_first(&lev->wait) != NIL) {
lev->activated = iq_getfirst(&lev->wait);
DS_activation(lev);
event_need_reschedule();
}
}
 
kern_event_post(&lev->lastdline, DS_deadline_timer, a);
// kern_printf("!");
}
 
static char *DS_status_to_a(WORD status)
{
if (status < MODULE_STATUS_BASE)
return status_to_a(status);
 
switch (status) {
case DS_WAIT : return "DS_Wait";
default : return "DS_Unknown";
}
}
 
 
static int DS_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) {
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m;
 
if (s->periodicity == APERIODIC)
return 0;
}
return -1;
}
 
static int DS_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
{
return -1;
}
 
static char *onoff(int i)
{
if (i)
return "On ";
else
return "Off";
}
 
static void DS_level_status(LEVEL l)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->wait);
 
kern_printf("On-line guarantee : %s\n",
onoff(lev->flags & DS_ENABLE_GUARANTEE_EDF ||
lev->flags & DS_ENABLE_GUARANTEE_RM ));
kern_printf("Used Bandwidth : %u/%u\n",
lev->U, MAX_BANDWIDTH);
 
if (lev->activated != -1)
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
iq_query_timespec(lev->activated,&lev->wait)->tv_sec,
iq_query_timespec(lev->activated,&lev->wait)->tv_nsec,
lev->nact[lev->activated],
DS_status_to_a(proc_table[lev->activated].status));
 
while (p != NIL) {
kern_printf("Pid: %2d Name: %10s Stat: %s\n",
p,
proc_table[p].name,
DS_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->wait);
}
}
 
static PID DS_level_scheduler(LEVEL l)
{
/* the DS don't schedule anything...
it's an EDF level or similar that do it! */
return NIL;
}
 
static PID DS_level_schedulerbackground(LEVEL l)
{
/* the DS catch the background time to exec aperiodic activities */
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
lev->flags |= DS_BACKGROUND;
 
if (lev->flags & DS_BACKGROUND_BLOCK)
return NIL;
else
return iq_query_first(&lev->wait);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int DS_level_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int DS_level_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
if (*freebandwidth > lev->U + RM_MINFREEBANDWIDTH) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int DS_task_create(LEVEL l, PID p, TASK_MODEL *m)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
/* if the DS_task_create is called, then the pclass must be a
valid pclass. */
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m;
 
if (s->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void DS_task_detach(LEVEL l, PID p)
{
/* the DS level doesn't introduce any dinamic allocated new field. */
}
 
static int DS_task_eligible(LEVEL l, PID p)
{
return 0; /* if the task p is chosen, it is always eligible */
}
 
static void DS_task_dispatch(LEVEL l, PID p, int nostop)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
struct timespec ty;
 
// if (nostop) kern_printf("NOSTOP!!!!!!!!!!!!");
/* there is at least one task ready inserted in an EDF or similar
level note that we can't check the status because the scheduler set it
to exe before calling task_dispatch. we have to check
lev->activated != p instead */
if (lev->activated != p) {
iq_extract(p, &lev->wait);
//kern_printf("#%d#",p);
}
else {
//if (nostop) kern_printf("(gd status=%d)",proc_table[p].status);
level_table[ lev->scheduling_level ]->
guest_dispatch(lev->scheduling_level,p,nostop);
}
 
/* set the capacity timer */
if (!nostop) {
TIMESPEC_ASSIGN(&ty, &schedule_time);
ADDUSEC2TIMESPEC(lev->availCs,&ty);
cap_timer = kern_event_post(&ty, capacity_timer, NULL);
}
 
// kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec);
}
 
static void DS_task_epilogue(LEVEL l, PID p)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & DS_BACKGROUND)
lev->flags &= ~DS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
// kern_printf("(epil %d %d)",lev->availCs, proc_table[p].avail_time);
 
/* check if the server capacity is finished... */
if (lev->availCs < 0) {
// kern_printf("(epil Cs%d %d:%d act%d p%d)",
// lev->availCs,proc_table[p].timespec_priority.tv_sec,
// proc_table[p].timespec_priority.tv_nsec,
// lev->activated,p);
/* the server slice has finished... do the task_end!!!
a first version of the module used the task_endcycle, but it was
not conceptually correct because the task didn't stop because it
finished all the work but because the server didn't have budget!
So, if the task_endcycle is called, the task remain into the
master level, and we can't wake him up if, for example, another
task point the shadow to it!!!*/
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = DS_WAIT;
lev->activated = NIL;
}
else
/* the task has been preempted. it returns into the ready queue or to the
wait queue by calling the guest_epilogue... */
if (lev->activated == p) {//kern_printf("Û1");
level_table[ lev->scheduling_level ]->
guest_epilogue(lev->scheduling_level,p);
} else { //kern_printf("Û2");
iq_insertfirst(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
}
 
static void DS_task_activate(LEVEL l, PID p)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
if (lev->activated == p || proc_table[p].status == DS_WAIT) {
if (lev->nact[p] != -1)
lev->nact[p]++;
}
else if (proc_table[p].status == SLEEP) {
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
if (lev->activated == NIL && lev->availCs > 0) {
lev->activated = p;
DS_activation(lev);
}
else {
iq_insertlast(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
}
else
{ kern_printf("DS_REJ%d %d %d %d ",p, proc_table[p].status, lev->activated, lev->wait.first);
return; }
 
}
 
static void DS_task_insert(LEVEL l, PID p)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
lev->flags &= ~DS_BACKGROUND_BLOCK;
 
lev->activated = -1;
 
/* when we reinsert the task into the system, the server capacity
is always 0 because nobody executes with the DS before... */
iq_insertfirst(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
 
static void DS_task_extract(LEVEL l, PID p)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
/* update the server capacity */
lev->availCs = 0;
 
lev->flags |= DS_BACKGROUND_BLOCK;
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
}
 
static void DS_task_endcycle(LEVEL l, PID p)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & DS_BACKGROUND)
lev->flags &= ~DS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
 
if (lev->nact[p] > 0)
{
lev->nact[p]--;
iq_insertlast(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
else
proc_table[p].status = SLEEP;
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
DS_activation(lev);
}
 
static void DS_task_end(LEVEL l, PID p)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & DS_BACKGROUND)
lev->flags &= ~DS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
 
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
DS_activation(lev);
}
 
static void DS_task_sleep(LEVEL l, PID p)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & DS_BACKGROUND)
lev->flags &= ~DS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
if (lev->nact[p] >= 0) lev->nact[p] = 0;
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
 
proc_table[p].status = SLEEP;
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
DS_activation(lev);
}
 
 
static int DS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void DS_guest_detach(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_activate(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_insert(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_extract(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_end(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
 
/*+ This init function install the DS deadline timer
+*/
static void DS_dline_install(void *l)
{
DS_level_des *lev = (DS_level_des *)(level_table[(LEVEL)l]);
 
ll_gettime(TIME_EXACT,&lev->lastdline);
ADDUSEC2TIMESPEC(lev->period, &lev->lastdline);
 
kern_event_post(&lev->lastdline, DS_deadline_timer, l);
}
 
 
 
/*+ Registration function:
int flags the init flags ... see DS.h +*/
void DS_register_level(int flags, LEVEL master, int Cs, int per)
{
LEVEL l; /* the level that we register */
DS_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("DS_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor();
 
printk(" alloco descrittore %d %d\n",l,(int)sizeof(DS_level_des));
 
/* alloc the space needed for the DS_level_des */
lev = (DS_level_des *)kern_alloc(sizeof(DS_level_des));
 
printk(" lev=%d\n",(int)lev);
 
/* update the level_table with the new entry */
level_table[l] = (level_des *)lev;
 
/* fill the standard descriptor */
strncpy(lev->l.level_name, DS_LEVELNAME, MAX_LEVELNAME);
lev->l.level_code = DS_LEVEL_CODE;
lev->l.level_version = DS_LEVEL_VERSION;
 
lev->l.level_accept_task_model = DS_level_accept_task_model;
lev->l.level_accept_guest_model = DS_level_accept_guest_model;
lev->l.level_status = DS_level_status;
 
if (flags & DS_ENABLE_BACKGROUND)
lev->l.level_scheduler = DS_level_schedulerbackground;
else
lev->l.level_scheduler = DS_level_scheduler;
 
if (flags & DS_ENABLE_GUARANTEE_EDF)
lev->l.level_guarantee = DS_level_guaranteeEDF;
else if (flags & DS_ENABLE_GUARANTEE_RM)
lev->l.level_guarantee = DS_level_guaranteeRM;
else
lev->l.level_guarantee = NULL;
 
lev->l.task_create = DS_task_create;
lev->l.task_detach = DS_task_detach;
lev->l.task_eligible = DS_task_eligible;
lev->l.task_dispatch = DS_task_dispatch;
lev->l.task_epilogue = DS_task_epilogue;
lev->l.task_activate = DS_task_activate;
lev->l.task_insert = DS_task_insert;
lev->l.task_extract = DS_task_extract;
lev->l.task_endcycle = DS_task_endcycle;
lev->l.task_end = DS_task_end;
lev->l.task_sleep = DS_task_sleep;
 
lev->l.guest_create = DS_guest_create;
lev->l.guest_detach = DS_guest_detach;
lev->l.guest_dispatch = DS_guest_dispatch;
lev->l.guest_epilogue = DS_guest_epilogue;
lev->l.guest_activate = DS_guest_activate;
lev->l.guest_insert = DS_guest_insert;
lev->l.guest_extract = DS_guest_extract;
lev->l.guest_endcycle = DS_guest_endcycle;
lev->l.guest_end = DS_guest_end;
lev->l.guest_sleep = DS_guest_sleep;
 
/* fill the DS descriptor part */
 
for (i=0; i<MAX_PROC; i++)
lev->nact[i] = -1;
 
lev->Cs = Cs;
lev->availCs = 0;
 
lev->period = per;
 
iq_init(&lev->wait, &freedesc, 0);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / per) * Cs;
 
lev->scheduling_level = master;
 
lev->flags = flags & 0x07;
 
sys_atrunlevel(DS_dline_install,(void *) l, RUNLEVEL_INIT);
}
 
bandwidth_t DS_usedbandwidth(LEVEL l)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
if (lev->l.level_code == DS_LEVEL_CODE &&
lev->l.level_version == DS_LEVEL_VERSION)
return lev->U;
else
return 0;
}
 
/shark/tags/rel_0_2/kernel/modules/cbs.c
0,0 → 1,736
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: cbs.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the aperiodic server CBS (Total Bandwidth Server)
 
Read CBS.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/cbs.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <kernel/trace.h>
 
/*+ 4 debug purposes +*/
#undef CBS_TEST
#undef CBS_COUNTER
 
#ifdef TESTG
#include "drivers/glib.h"
TIME x,oldx;
extern TIME starttime;
#endif
 
 
/*+ Status used in the level +*/
#define CBS_IDLE APER_STATUS_BASE /*+ waiting the activation +*/
#define CBS_ZOMBIE APER_STATUS_BASE+1 /*+ waiting the period end +*/
 
/*+ task flags +*/
#define CBS_SAVE_ARRIVALS 1
#define CBS_APERIODIC 2
 
/*+ the level redefinition for the Total Bandwidth Server level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
/* The wcet are stored in the task descriptor, but we need
an array for the deadlines. We can't use the timespec_priority
field because it is used by the master level!!!...
Notice that however the use of the timespec_priority field
does not cause any problem... */
 
struct timespec cbs_dline[MAX_PROC]; /*+ CBS deadlines +*/
 
TIME period[MAX_PROC]; /*+ CBS activation period +*/
 
struct timespec reactivation_time[MAX_PROC];
/*+ the time at witch the reactivation timer is post +*/
int reactivation_timer[MAX_PROC];
/*+ the recativation timer +*/
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
BYTE flag[MAX_PROC]; /*+ task flags +*/
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth by the server +*/
 
LEVEL scheduling_level;
 
} CBS_level_des;
 
#ifdef CBS_COUNTER
int cbs_counter=0;
int cbs_counter2=0;
#endif
 
 
static void CBS_activation(CBS_level_des *lev,
PID p,
struct timespec *acttime)
{
JOB_TASK_MODEL job;
 
/* we have to check if the deadline and the wcet are correct before
activating a new task or an old task... */
 
/* check 1: if the deadline is before than the actual scheduling time */
 
/* check 2: if ( avail_time >= (cbs_dline - acttime)* (wcet/period) )
(rule 7 in the CBS article!) */
TIME t;
struct timespec t2,t3;
 
t = (lev->period[p] * proc_table[p].avail_time) / proc_table[p].wcet;
t3.tv_sec = t / 1000000;
t3.tv_nsec = (t % 1000000) * 1000;
 
SUBTIMESPEC(&lev->cbs_dline[p], acttime, &t2);
 
if (/* 1 */ TIMESPEC_A_LT_B(&lev->cbs_dline[p], acttime) ||
/* 2 */ TIMESPEC_A_GT_B(&t3, &t2) ) {
/* if (TIMESPEC_A_LT_B(&lev->cbs_dline[p], acttime) )
kern_printf("$");
else
kern_printf("(Ûdline%d.%d act%d.%d wcet%d per%d avail%dÛ)",
lev->cbs_dline[p].tv_sec,lev->cbs_dline[p].tv_nsec/1000,
acttime->tv_sec, acttime->tv_nsec/1000,
proc_table[p].wcet, lev->period[p], proc_table[p].avail_time);
*/ /* we modify the deadline ... */
TIMESPEC_ASSIGN(&lev->cbs_dline[p], acttime);
ADDUSEC2TIMESPEC(lev->period[p], &lev->cbs_dline[p]);
 
/* and the capacity */
proc_table[p].avail_time = proc_table[p].wcet;
}
 
#ifdef TESTG
if (starttime && p == 3) {
oldx = x;
x = ((lev->cbs_dline[p].tv_sec*1000000+lev->cbs_dline[p].tv_nsec/1000)/5000 - starttime) + 20;
// kern_printf("(a%d)",lev->cbs_dline[p].tv_sec*1000000+lev->cbs_dline[p].tv_nsec/1000);
if (oldx > x) sys_end();
if (x<640)
grx_plot(x, 15, 8);
}
#endif
 
/* and, finally, we reinsert the task in the master level */
job_task_default_model(job, lev->cbs_dline[p]);
job_task_def_noexc(job);
level_table[ lev->scheduling_level ]->
guest_create(lev->scheduling_level, p, (TASK_MODEL *)&job);
level_table[ lev->scheduling_level ]->
guest_activate(lev->scheduling_level, p);
 
}
 
 
static char *CBS_status_to_a(WORD status)
{
if (status < MODULE_STATUS_BASE)
return status_to_a(status);
 
switch (status) {
case CBS_IDLE : return "CBS_Idle";
case CBS_ZOMBIE : return "CBS_Zombie";
default : return "CBS_Unknown";
}
}
 
static void CBS_avail_time_check(CBS_level_des *lev, PID p)
{
/* there is a while because if the wcet is << than the system tick
we need to postpone the deadline many times */
while (proc_table[p].avail_time <= 0) {
ADDUSEC2TIMESPEC(lev->period[p], &lev->cbs_dline[p]);
proc_table[p].avail_time += proc_table[p].wcet;
 
#ifdef TESTG
if (starttime && p == 3) {
oldx = x;
x = ((lev->cbs_dline[p].tv_sec*1000000+lev->cbs_dline[p].tv_nsec/1000)/5000 - starttime) + 20;
// kern_printf("(e%d avail%d)",lev->cbs_dline[p].tv_sec*1000000+lev->cbs_dline[p].tv_nsec/1000,proc_table[p].avail_time);
if (oldx > x) sys_end();
if (x<640)
grx_plot(x, 15, 2);
}
#endif
}
}
 
 
/* this is the periodic reactivation of the task... it is posted only
if the task is a periodic task */
static void CBS_timer_reactivate(void *par)
{
PID p = (PID) par;
CBS_level_des *lev;
 
lev = (CBS_level_des *)level_table[proc_table[p].task_level];
 
#ifdef CBS_COUNTER
if (p==5) cbs_counter++;
#endif
 
if (proc_table[p].status == CBS_IDLE) {
/* the task has finished the current activation and must be
reactivated */
CBS_activation(lev,p,&lev->reactivation_time[p]);
 
event_need_reschedule();
}
else if (lev->flag[p] & CBS_SAVE_ARRIVALS)
/* the task has not completed the current activation, so we save
the activation incrementing nact... */
lev->nact[p]++;
 
/* repost the event at the next period end... */
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
CBS_timer_reactivate,
(void *)p);
#ifdef CBS_COUNTER
if (p==5) cbs_counter2++;
#endif
/* tracer stuff */
trc_logevent(TRC_INTACTIVATION,&p);
 
}
 
/*+ this function is called when a killed or ended task reach the
period end +*/
static void CBS_timer_zombie(void *par)
{
PID p = (PID) par;
CBS_level_des *lev;
 
lev = (CBS_level_des *)level_table[proc_table[p].task_level];
 
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
 
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
 
}
 
 
static int CBS_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l)) {
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m;
if (s->met && s->period)
return 0;
}
 
return -1;
}
 
static int CBS_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
{
return -1;
}
 
static char *onoff(int i)
{
if (i)
return "On ";
else
return "Off";
}
 
static void CBS_level_status(LEVEL l)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
PID p;
 
kern_printf("On-line guarantee : %s\n",
onoff(lev->flags & CBS_ENABLE_GUARANTEE));
kern_printf("Used Bandwidth : %u/%u\n",
lev->U, MAX_BANDWIDTH);
 
for (p=0; p<MAX_PROC; p++)
if (proc_table[p].task_level == l && proc_table[p].status != FREE )
kern_printf("Pid: %2d Name: %10s Period: %9ld Dline: %9ld.%6ld Stat: %s\n",
p,
proc_table[p].name,
lev->period[p],
lev->cbs_dline[p].tv_sec,
lev->cbs_dline[p].tv_nsec/1000,
CBS_status_to_a(proc_table[p].status));
}
 
static PID CBS_level_scheduler(LEVEL l)
{
/* the CBS don't schedule anything...
it's an EDF level or similar that do it! */
return NIL;
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int CBS_level_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
if (lev->flags & CBS_FAILED_GUARANTEE) {
*freebandwidth = 0;
return 0;
}
else
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int CBS_task_create(LEVEL l, PID p, TASK_MODEL *m)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
/* if the CBS_task_create is called, then the pclass must be a
valid pclass. */
SOFT_TASK_MODEL *soft = (SOFT_TASK_MODEL *)m;
 
/* Enable wcet check */
proc_table[p].avail_time = soft->met;
proc_table[p].wcet = soft->met;
proc_table[p].control |= CONTROL_CAP;
 
lev->nact[p] = 0;
lev->period[p] = soft->period;
NULL_TIMESPEC(&lev->cbs_dline[p]);
 
if (soft->periodicity == APERIODIC)
lev->flag[p] = CBS_APERIODIC;
else
lev->flag[p] = 0;
 
if (soft->arrivals == SAVE_ARRIVALS)
lev->flag[p] |= CBS_SAVE_ARRIVALS;
 
/* update the bandwidth... */
if (lev->flags & CBS_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / soft->period) * soft->met;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b)
lev->U += b;
else
/* The task can NOT be guaranteed (U>MAX_BANDWIDTH)...
(see EDF.c) */
lev->flags |= CBS_FAILED_GUARANTEE;
}
 
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void CBS_task_detach(LEVEL l, PID p)
{
/* the CBS level doesn't introduce any dinamic allocated new field.
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated
bandwidth */
 
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
if (lev->flags & CBS_FAILED_GUARANTEE)
lev->flags &= ~CBS_FAILED_GUARANTEE;
else
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet;
}
 
static int CBS_task_eligible(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
JOB_TASK_MODEL job;
 
/* we have to check if the deadline and the wcet are correct...
if the CBS level schedules in background with respect to others
levels, there can be the case in witch a task is scheduled by
schedule_time > CBS_deadline; in this case (not covered in the
article because if there is only the standard scheduling policy
this never apply) we reassign the deadline */
 
if ( TIMESPEC_A_LT_B(&lev->cbs_dline[p], &schedule_time) ) {
/* we kill the current activation */
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level, p);
 
/* we modify the deadline ... */
TIMESPEC_ASSIGN(&lev->cbs_dline[p], &schedule_time);
ADDUSEC2TIMESPEC(lev->period[p], &lev->cbs_dline[p]);
 
/* and the capacity */
proc_table[p].avail_time = proc_table[p].wcet;
/* and, finally, we reinsert the task in the master level */
job_task_default_model(job, lev->cbs_dline[p]);
job_task_def_noexc(job);
level_table[ lev->scheduling_level ]->
guest_create(lev->scheduling_level, p, (TASK_MODEL *)&job);
level_table[ lev->scheduling_level ]->
guest_activate(lev->scheduling_level, p);
 
return -1;
}
 
return 0;
}
 
static void CBS_task_dispatch(LEVEL l, PID p, int nostop)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
level_table[ lev->scheduling_level ]->
guest_dispatch(lev->scheduling_level,p,nostop);
}
 
static void CBS_task_epilogue(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
JOB_TASK_MODEL job;
 
/* check if the wcet is finished... */
if ( proc_table[p].avail_time <= 0) {
/* we kill the current activation */
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level, p);
 
/* we modify the deadline according to rule 4 ... */
CBS_avail_time_check(lev, p);
 
/* and, finally, we reinsert the task in the master level */
job_task_default_model(job, lev->cbs_dline[p]);
job_task_def_noexc(job);
level_table[ lev->scheduling_level ]->
guest_create(lev->scheduling_level, p, (TASK_MODEL *)&job);
level_table[ lev->scheduling_level ]->
guest_activate(lev->scheduling_level, p);
// kern_printf("epil : dl %d per %d p %d |\n",
// lev->cbs_dline[p].tv_nsec/1000,lev->period[p],p);
 
}
else
/* the task has been preempted. it returns into the ready queue by
calling the guest_epilogue... */
level_table[ lev->scheduling_level ]->
guest_epilogue(lev->scheduling_level,p);
}
 
static void CBS_task_activate(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
/* save activation (only if needed... */
if (proc_table[p].status != SLEEP) {
if (lev->flag[p] & CBS_SAVE_ARRIVALS)
lev->nact[p]++;
return;
}
 
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
CBS_activation(lev, p, &proc_table[p].request_time);
 
/* Set the reactivation timer */
if (!(lev->flag[p] & CBS_APERIODIC))
{
/* we cannot use the deadline computed by CBS_activation because
the deadline may be != from actual_time + period
(if we call the task_activate after a task_sleep, and the
deadline was postponed a lot...) */
TIMESPEC_ASSIGN(&lev->reactivation_time[p], &proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
CBS_timer_reactivate,
(void *)p);
#ifdef CBS_COUNTER
if (p==5) cbs_counter2++;
#endif
}
// kern_printf("act : %d %d |",lev->cbs_dline[p].tv_nsec/1000,p);
}
 
static void CBS_task_insert(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
struct timespec acttime;
 
ll_gettime(TIME_EXACT, &acttime);
 
CBS_activation(lev,p,&acttime);
}
 
static void CBS_task_extract(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
CBS_avail_time_check(lev, p);
 
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
}
 
static void CBS_task_endcycle(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
CBS_avail_time_check(lev, p);
 
if (lev->nact[p]) {
/* continue!!!! */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
lev->nact[p]--;
level_table[ lev->scheduling_level ]->
guest_epilogue(lev->scheduling_level,p);
}
else {
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
 
if (lev->flag[p] & CBS_APERIODIC)
proc_table[p].status = SLEEP;
else /* the task is soft_periodic */
proc_table[p].status = CBS_IDLE;
 
}
}
 
static void CBS_task_end(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
CBS_avail_time_check(lev, p);
 
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
 
/* we delete the reactivation timer */
if (!(lev->flag[p] & CBS_APERIODIC)) {
event_delete(lev->reactivation_timer[p]);
lev->reactivation_timer[p] = -1;
}
 
/* Finally, we post the zombie event. when the end period is reached,
the task descriptor and banwidth are freed */
proc_table[p].status = CBS_ZOMBIE;
lev->reactivation_timer[p] = kern_event_post(&lev->cbs_dline[p],
CBS_timer_zombie,
(void *)p);
}
 
static void CBS_task_sleep(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
CBS_avail_time_check(lev, p);
 
/* a task activation is finished, but we are using a JOB_TASK_MODEL
that implements a single activation, so we have to call
the guest_end, that representsa single activation... */
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
 
/* we delete the reactivation timer */
if (!(lev->flag[p] & CBS_APERIODIC)) {
event_delete(lev->reactivation_timer[p]);
lev->reactivation_timer[p] = -1;
}
 
proc_table[p].status = SLEEP;
 
/* the sleep forgets pending activations... */
lev->nact[p] = 0;
}
 
static int CBS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void CBS_guest_detach(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_activate(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_insert(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_extract(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_end(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
/* Registration functions */
 
/*+ Registration function:
int flags the init flags ... see CBS.h +*/
void CBS_register_level(int flags, LEVEL master)
{
LEVEL l; /* the level that we register */
CBS_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("CBS_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor();
 
printk(" alloco descrittore %d %d\n",l,(int)sizeof(CBS_level_des));
 
/* alloc the space needed for the CBS_level_des */
lev = (CBS_level_des *)kern_alloc(sizeof(CBS_level_des));
 
printk(" lev=%d\n",(int)lev);
 
/* update the level_table with the new entry */
level_table[l] = (level_des *)lev;
 
/* fill the standard descriptor */
strncpy(lev->l.level_name, CBS_LEVELNAME, MAX_LEVELNAME);
lev->l.level_code = CBS_LEVEL_CODE;
lev->l.level_version = CBS_LEVEL_VERSION;
 
lev->l.level_accept_task_model = CBS_level_accept_task_model;
lev->l.level_accept_guest_model = CBS_level_accept_guest_model;
lev->l.level_status = CBS_level_status;
lev->l.level_scheduler = CBS_level_scheduler;
 
if (flags & CBS_ENABLE_GUARANTEE)
lev->l.level_guarantee = CBS_level_guarantee;
else
lev->l.level_guarantee = NULL;
 
lev->l.task_create = CBS_task_create;
lev->l.task_detach = CBS_task_detach;
lev->l.task_eligible = CBS_task_eligible;
lev->l.task_dispatch = CBS_task_dispatch;
lev->l.task_epilogue = CBS_task_epilogue;
lev->l.task_activate = CBS_task_activate;
lev->l.task_insert = CBS_task_insert;
lev->l.task_extract = CBS_task_extract;
lev->l.task_endcycle = CBS_task_endcycle;
lev->l.task_end = CBS_task_end;
lev->l.task_sleep = CBS_task_sleep;
 
lev->l.guest_create = CBS_guest_create;
lev->l.guest_detach = CBS_guest_detach;
lev->l.guest_dispatch = CBS_guest_dispatch;
lev->l.guest_epilogue = CBS_guest_epilogue;
lev->l.guest_activate = CBS_guest_activate;
lev->l.guest_insert = CBS_guest_insert;
lev->l.guest_extract = CBS_guest_extract;
lev->l.guest_endcycle = CBS_guest_endcycle;
lev->l.guest_end = CBS_guest_end;
lev->l.guest_sleep = CBS_guest_sleep;
 
/* fill the CBS descriptor part */
for (i=0; i<MAX_PROC; i++) {
NULL_TIMESPEC(&lev->cbs_dline[i]);
lev->period[i] = 0;
NULL_TIMESPEC(&lev->reactivation_time[i]);
lev->reactivation_timer[i] = -1;
lev->nact[i] = 0;
lev->flag[i] = 0;
}
 
 
lev->U = 0;
 
lev->scheduling_level = master;
 
lev->flags = flags & 0x01;
}
 
bandwidth_t CBS_usedbandwidth(LEVEL l)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
if (lev->l.level_code == CBS_LEVEL_CODE &&
lev->l.level_version == CBS_LEVEL_VERSION)
return lev->U;
else
return 0;
}
 
int CBS_get_nact(LEVEL l, PID p)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
return lev->nact[p];
}
 
/shark/tags/rel_0_2/kernel/modules/nopm.c
0,0 → 1,389
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: nopm.c,v 1.2 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
See modules/nopm.h.
This code is a copy of nop.c with minor modifications.
**/
 
/*
* Copyright (C) 2000 Massimiliano Giorgi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/nop.h>
 
#include <ll/ll.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <modules/codes.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/* The NOPM resource level descriptor */
typedef struct {
mutex_resource_des m; /*+ the mutex interface +*/
} NOPM_mutex_resource_des;
 
 
/* this is the structure normally pointed by the opt field in the
mutex_t structure */
typedef struct {
PID owner;
IQUEUE blocked;
int counter;
} NOPM_mutex_t;
 
 
 
 
 
 
 
 
 
#define MAXTABLE 4096
static mutex_t *table[MAXTABLE];
static int index=0;
 
static int register_nopm(mutex_t *p)
{
if (index>=MAXTABLE) return -1;
table[index++]=p;
return 0;
}
 
void dump_nopm_table(void)
{
NOPM_mutex_t *ptr;
SYS_FLAGS f;
PID j;
int i;
 
f=kern_fsave();
kern_printf("nopm_mutex module TABLE\n");
kern_printf("----------------------\n");
for(i=0;i<index;i++) {
ptr=table[i]->opt;
if (!iq_isempty(&ptr->blocked)) {
kern_printf("%i blocks on 0x%p: ",ptr->owner,table[i]);
j=iq_query_first(&ptr->blocked);
while (j!=NIL) {
kern_printf("%i ",(int)j);
j=iq_query_next(j, &ptr->blocked);
}
kern_printf("\n");
} else {
//kern_printf("0x%p no block\n",table[i]);
}
}
kern_frestore(f);
 
}
 
 
 
 
 
 
 
 
 
 
 
/* Wait status for this library */
#define NOPM_WAIT LIB_STATUS_BASE
 
 
/*+ print resource protocol statistics...+*/
static void NOPM_resource_status(RLEVEL r)
{
kern_printf("No status for NOPM module\n");
}
 
 
static int NOPM_level_accept_resource_model(RLEVEL l, RES_MODEL *r)
{
/* priority inheritance works with all tasks without Resource parameters */
return -1;
}
 
static void NOPM_res_register(RLEVEL l, PID p, RES_MODEL *r)
{
/* never called!!! */
}
 
static void NOPM_res_detach(RLEVEL l, PID p)
{
}
 
static int NOPM_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a)
{
if (a->mclass == NOPM_MCLASS || a->mclass == (NOPM_MCLASS | l) )
return 0;
else
return -1;
}
 
static int NOPM_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
{
NOPM_mutex_t *p;
 
p = (NOPM_mutex_t *) kern_alloc(sizeof(NOPM_mutex_t));
 
/* control if there is enough memory; no control on init on a
non- destroyed mutex */
 
if (!p)
return (ENOMEM);
 
p->owner = NIL;
iq_init(&p->blocked, &freedesc, 0);
p->counter=0;
m->mutexlevel = l;
m->opt = (void *)p;
/* MG */
register_nopm(m);
return 0;
}
 
 
static int NOPM_destroy(RLEVEL l, mutex_t *m)
{
// NOPM_mutex_resource_des *lev = (NOPM_mutex_resource_des *)(resource_table[l]);
 
if ( ((NOPM_mutex_t *)m->opt)->owner != NIL)
return (EBUSY);
 
kern_cli();
if (m->opt) {
kern_free(m->opt,sizeof(NOPM_mutex_t));
m->opt = NULL;
}
kern_sti();
 
return 0;
}
 
static int NOPM_lock(RLEVEL l, mutex_t *m)
{
NOPM_mutex_t *p;
 
kern_cli();
 
p = (NOPM_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, initialize it! */
NOPM_mutexattr_t a;
NOPM_mutexattr_default(a);
NOPM_init(l, m, &a);
}
 
if (p->owner == exec_shadow) {
/* the task already owns the mutex */
p->counter++;
kern_sti();
return 0;
}
 
if (p->owner != NIL) { /* We must block exec task */
LEVEL l; /* for readableness only */
TIME tx; /* a dummy TIME for timespec operations */
struct timespec ty; /* a dummy timespec for timespec operations */
proc_table[exec_shadow].context = kern_context_save();
/* SAME AS SCHEDULER... manage the capacity event and the load_info */
ll_gettime(TIME_EXACT, &schedule_time);
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
proc_table[exec_shadow].avail_time -= tx;
jet_update_slice(tx);
if (cap_timer != NIL) {
event_delete(cap_timer);
cap_timer = NIL;
}
l = proc_table[exec_shadow].task_level;
level_table[l]->task_extract(l,exec_shadow);
 
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = NOPM_WAIT;
iq_insertlast(exec_shadow,&p->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
kern_context_load(proc_table[exec_shadow].context);
}
else {
/* the mutex is free, We can lock it! */
p->owner = exec_shadow;
p->counter++;
kern_sti();
}
 
return 0;
}
 
static int NOPM_trylock(RLEVEL l, mutex_t *m)
{
NOPM_mutex_t *p;
 
kern_cli();
 
p = (NOPM_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, initialize it! */
NOPM_mutexattr_t a;
NOPM_mutexattr_default(a);
NOPM_init(l, m, &a);
}
 
if (p->owner != NIL) {
/* a task already owns the mutex */
kern_sti();
return (EBUSY);
}
else {
/* the mutex is free, We can lock it! */
p->owner = exec_shadow;
p->counter++;
kern_sti();
}
 
return 0;
}
 
static int NOPM_unlock(RLEVEL l, mutex_t *m)
{
NOPM_mutex_t *p;
PID e;
 
p = (NOPM_mutex_t *)m->opt;
if (!p)
return (EINVAL);
 
if (p->owner != exec_shadow) {
/* the mutex is owned by another task!!! */
kern_printf("wrongunlock<owner=%i,unlocker=%i>",p->owner,exec_shadow);
kern_sti();
return (EPERM);
}
 
p->counter--;
if (p->counter!=0) {
/* we have multiple lock on this mutex */
kern_sti();
return 0;
}
proc_table[exec_shadow].context = kern_context_save();
 
/* the mutex is mine, pop the firsttask to extract */
for (;;) {
e = iq_getfirst(&p->blocked);
if (e == NIL) {
p->owner = NIL;
break;
} else if (proc_table[e].status == NOPM_WAIT) {
l = proc_table[e].task_level;
level_table[l]->task_insert(l,e);
p->counter++;
break;
}
}
 
/* MG!!! */
p->owner = e;
 
scheduler();
kern_context_load(proc_table[exec_shadow].context);
 
return 0;
}
 
void NOPM_register_module(void)
{
RLEVEL l; /* the level that we register */
NOPM_mutex_resource_des *m; /* for readableness only */
 
printk("NOPM_register_module\n");
 
/* request an entry in the level_table */
l = resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m = (NOPM_mutex_resource_des *)kern_alloc(sizeof(NOPM_mutex_resource_des));
 
/* update the level_table with the new entry */
resource_table[l] = (resource_des *)m;
 
/* fill the resource_des descriptor */
strncpy(m->m.r.res_name, NOPM_MODULENAME, MAX_MODULENAME);
m->m.r.res_code = NOPM_MODULE_CODE;
m->m.r.res_version = NOPM_MODULE_VERSION;
 
m->m.r.rtype = MUTEX_RTYPE;
 
m->m.r.resource_status = NOPM_resource_status;
m->m.r.level_accept_resource_model = NOPM_level_accept_resource_model;
m->m.r.res_register = NOPM_res_register;
 
m->m.r.res_detach = NOPM_res_detach;
 
/* fill the mutex_resource_des descriptor */
m->m.level_accept_mutexattr = NOPM_level_accept_mutexattr;
m->m.init = NOPM_init;
m->m.destroy = NOPM_destroy;
m->m.lock = NOPM_lock;
m->m.trylock = NOPM_trylock;
m->m.unlock = NOPM_unlock;
 
}
 
/shark/tags/rel_0_2/kernel/modules/rm.c
0,0 → 1,686
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: rm.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the scheduling module RM (Rate Monotonic)
 
Read rm.h for further details.
 
This file is equal to EDF.c except for:
 
. EDF changed to RM :-)
. q_timespec_insert changed to q_insert
. proc_table[p].priority is also modified when we modify lev->period[p]
 
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/rm.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <kernel/trace.h>
 
/*+ Status used in the level +*/
#define RM_READY MODULE_STATUS_BASE /*+ - Ready status +*/
#define RM_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/
#define RM_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/
#define RM_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/
#define RM_ZOMBIE MODULE_STATUS_BASE+5 /*+ to wait the free time +*/
 
/*+ flags +*/
#define RM_FLAG_SPORADIC 1
#define RM_FLAG_NORAISEEXC 2
 
/*+ the level redefinition for the Rate Monotonic +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
TIME period[MAX_PROC]; /*+ The task periods; the deadlines are
stored in the priority field +*/
int deadline_timer[MAX_PROC];
/*+ The task deadline timers +*/
 
int flag[MAX_PROC];
/*+ used to manage the JOB_TASK_MODEL and the
periodicity +*/
 
IQUEUE ready; /*+ the ready queue +*/
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth +*/
 
} RM_level_des;
 
 
static char *RM_status_to_a(WORD status)
{
if (status < MODULE_STATUS_BASE)
return status_to_a(status);
 
switch (status) {
case RM_READY : return "RM_Ready";
case RM_WCET_VIOLATED: return "RM_Wcet_Violated";
case RM_WAIT : return "RM_Sporadic_Wait";
case RM_IDLE : return "RM_Idle";
case RM_ZOMBIE : return "RM_Zombie";
default : return "RM_Unknown";
}
}
 
static void RM_timer_deadline(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
struct timespec *temp;
 
lev = (RM_level_des *)level_table[proc_table[p].task_level];
 
switch (proc_table[p].status) {
case RM_ZOMBIE:
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
break;
 
case RM_IDLE:
/* tracer stuff */
trc_logevent(TRC_INTACTIVATION,&p);
/* similar to RM_task_activate */
temp = iq_query_timespec(p, &lev->ready);
TIMESPEC_ASSIGN(&proc_table[p].request_time, temp);
ADDUSEC2TIMESPEC(lev->period[p], temp);
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(temp,
RM_timer_deadline,
(void *)p);
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority );
event_need_reschedule();
printk("el%d|",p);
break;
 
case RM_WAIT:
/* Without this, the task cannot be reactivated!!! */
proc_table[p].status = SLEEP;
break;
 
default:
/* else, a deadline miss occurred!!! */
kern_printf("timer_deadline:AAARRRGGGHHH!!!");
kern_raise(XDEADLINE_MISS,p);
}
}
 
static void RM_timer_guest_deadline(void *par)
{
PID p = (PID) par;
 
kern_printf("AAARRRGGGHHH!!!");
kern_raise(XDEADLINE_MISS,p);
}
 
static int RM_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) {
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m;
 
if (h->wcet && h->mit)
return 0;
}
 
return -1;
}
 
static int RM_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == JOB_PCLASS || m->pclass == (JOB_PCLASS | l))
return 0;
else
return -1;
}
 
 
static char *onoff(int i)
{
if (i)
return "On ";
else
return "Off";
}
 
static void RM_level_status(LEVEL l)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->ready);
 
kern_printf("Wcet Check : %s\n",
onoff(lev->flags & RM_ENABLE_WCET_CHECK));
kern_printf("On-line guarantee : %s\n",
onoff(lev->flags & RM_ENABLE_GUARANTEE));
kern_printf("Used Bandwidth : %u/%u\n",
lev->U, MAX_BANDWIDTH);
 
while (p != NIL) {
if ((proc_table[p].pclass) == JOB_PCLASS)
kern_printf("Pid: %2d (GUEST)\n", p);
else
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n",
p,
proc_table[p].name,
lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period ",
lev->period[p],
iq_query_timespec(p, &lev->ready)->tv_sec,
iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
RM_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->ready);
}
 
for (p=0; p<MAX_PROC; p++)
if (proc_table[p].task_level == l && proc_table[p].status != RM_READY
&& proc_table[p].status != FREE )
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n",
p,
proc_table[p].name,
lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period ",
lev->period[p],
iq_query_timespec(p, &lev->ready)->tv_sec,
iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
RM_status_to_a(proc_table[p].status));
}
 
/* The scheduler only gets the first task in the queue */
static PID RM_level_scheduler(LEVEL l)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* { // print 4 dbg the ready queue
PID p= lev->ready;
kern_printf("(s");
while (p != NIL) {
kern_printf("%d ",p);
p = proc_table[p].next;
}
kern_printf(") ");
}
*/
return iq_query_first(&lev->ready);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int RM_level_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
if (lev->flags & RM_FAILED_GUARANTEE) {
*freebandwidth = 0;
return 0;
}
else
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
 
}
 
static int RM_task_create(LEVEL l, PID p, TASK_MODEL *m)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* if the RM_task_create is called, then the pclass must be a
valid pclass. */
 
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m;
 
*iq_query_priority(p, &lev->ready) = lev->period[p] = h->mit;
 
if (h->periodicity == APERIODIC)
lev->flag[p] = RM_FLAG_SPORADIC;
else
lev->flag[p] = 0;
lev->deadline_timer[p] = -1;
 
/* Enable wcet check */
if (lev->flags & RM_ENABLE_WCET_CHECK) {
proc_table[p].avail_time = h->wcet;
proc_table[p].wcet = h->wcet;
proc_table[p].control |= CONTROL_CAP;
}
 
/* update the bandwidth... */
if (lev->flags & RM_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / h->mit) * h->wcet;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b)
lev->U += b;
else
/* The task can NOT be guaranteed (U>MAX_BANDWIDTH)...
in this case, we don't raise an exception... in fact, after the
RM_task_create the task_create will call level_guarantee that return
-1... return -1 in RM_task_create isn't correct, because:
. generally, the guarantee must be done when also the resources
are registered
. returning -1 will cause the task_create to return with an errno
ETASK_CREATE instead of ENO_GUARANTEE!!!
 
Why I use the flag??? because if the lev->U overflows, if i.e. I set
it to MAX_BANDWIDTH, I lose the correct allocated bandwidth...
*/
lev->flags |= RM_FAILED_GUARANTEE;
}
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void RM_task_detach(LEVEL l, PID p)
{
/* the RM level doesn't introduce any dinamic allocated new field.
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated
bandwidth */
 
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
if (lev->flags & RM_FAILED_GUARANTEE)
lev->flags &= ~RM_FAILED_GUARANTEE;
else
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet;
}
 
static int RM_task_eligible(LEVEL l, PID p)
{
return 0; /* if the task p is chosen, it is always eligible */
}
 
static void RM_task_dispatch(LEVEL l, PID p, int nostop)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
// kern_printf("(disp %d)",p);
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
static void RM_task_epilogue(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
// kern_printf("(epil %d)",p);
 
/* check if the wcet is finished... */
if ((lev->flags & RM_ENABLE_WCET_CHECK) && proc_table[p].avail_time <= 0) {
/* if it is, raise a XWCET_VIOLATION exception */
kern_raise(XWCET_VIOLATION,p);
proc_table[p].status = RM_WCET_VIOLATED;
}
else {
/* the task has been preempted. it returns into the ready queue... */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
}
}
 
static void RM_task_activate(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
struct timespec *temp;
 
if (proc_table[p].status == RM_WAIT) {
kern_raise(XACTIVATION,p);
return;
}
 
/* Test if we are trying to activate a non sleeping task */
/* Ignore this; the task is already active */
if (proc_table[p].status != SLEEP &&
proc_table[p].status != RM_WCET_VIOLATED)
return;
 
 
/* see also RM_timer_deadline */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
temp = iq_query_timespec(p, &lev->ready);
TIMESPEC_ASSIGN(temp, &proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period[p], temp);
 
/* Insert task in the correct position */
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
 
/* Set the deadline timer */
lev->deadline_timer[p] = kern_event_post(temp,
RM_timer_deadline,
(void *)p);
}
 
static void RM_task_insert(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* Similar to RM_task_activate, but we don't check in what state
the task is and we don't set the request_time*/
 
/* Insert task in the correct position */
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
}
 
static void RM_task_extract(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
. the deadline must remain...
 
So, we do nothing!!!
*/
}
 
static void RM_task_endcycle(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* the task has terminated his job before it consume the wcet. All OK! */
if (lev->flag[p] & RM_FLAG_SPORADIC)
proc_table[p].status = RM_WAIT;
else /* pclass = sporadic_pclass */
proc_table[p].status = RM_IDLE;
 
/* we reset the capacity counters... */
if (lev->flags & RM_ENABLE_WCET_CHECK)
proc_table[p].avail_time = proc_table[p].wcet;
 
/* when the deadline timer fire, it recognize the situation and set
correctly all the stuffs (like reactivation, request_time, etc... ) */
}
 
static void RM_task_end(LEVEL l, PID p)
{
// RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
proc_table[p].status = RM_ZOMBIE;
 
/* When the deadline timer fire, it put the task descriptor in
the free queue, and free the allocated bandwidth... */
}
 
static void RM_task_sleep(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* the task has terminated his job before it consume the wcet. All OK! */
proc_table[p].status = RM_WAIT;
 
/* we reset the capacity counters... */
if (lev->flags & RM_ENABLE_WCET_CHECK)
proc_table[p].avail_time = proc_table[p].wcet;
 
/* when the deadline timer fire, it recognize the situation and set
correctly the task state to sleep... */
}
 
 
/* Guest Functions
These functions manages a JOB_TASK_MODEL, that is used to put
a guest task in the RM ready queue. */
 
static int RM_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
JOB_TASK_MODEL *job = (JOB_TASK_MODEL *)m;
 
/* if the RM_guest_create is called, then the pclass must be a
valid pclass. */
 
 
*iq_query_timespec(p,&lev->ready) = job->deadline;
lev->deadline_timer[p] = -1;
 
if (job->noraiseexc)
lev->flag[p] = RM_FLAG_NORAISEEXC;
else
lev->flag[p] = 0;
 
*iq_query_priority(p, &lev->ready) = lev->period[p] = job->period;
 
/* there is no bandwidth guarantee at this level, it is performed
by the level that inserts guest tasks... */
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void RM_guest_detach(LEVEL l, PID p)
{
/* the RM level doesn't introduce any dinamic allocated new field.
No guarantee is performed on guest tasks... so we don't have to reset
the NO_GUARANTEE FIELD */
}
 
static void RM_guest_dispatch(LEVEL l, PID p, int nostop)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* the task state is set to EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
static void RM_guest_epilogue(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* the task has been preempted. it returns into the ready queue... */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
}
 
static void RM_guest_activate(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
 
/* Set the deadline timer */
if (!(lev->flag[p] & RM_FLAG_NORAISEEXC))
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
RM_timer_guest_deadline,
(void *)p);
}
 
static void RM_guest_insert(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
}
 
static void RM_guest_extract(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the state of the task is set by the calling function
. the deadline must remain...
 
So, we do nothing!!!
*/
}
 
static void RM_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RM_guest_end(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
//kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]);
if (proc_table[p].status == RM_READY)
{
iq_extract(p, &lev->ready);
//kern_printf("(g_end rdy extr)");
}
 
/* we remove the deadline timer, because the slice is finished */
if (lev->deadline_timer[p] != NIL) {
// kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]);
event_delete(lev->deadline_timer[p]);
lev->deadline_timer[p] = NIL;
}
 
}
 
static void RM_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ Registration function:
int flags the init flags ... see rm.h +*/
void RM_register_level(int flags)
{
LEVEL l; /* the level that we register */
RM_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("RM_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor();
 
/* alloc the space needed for the RM_level_des */
lev = (RM_level_des *)kern_alloc(sizeof(RM_level_des));
 
printk(" lev=%d\n",(int)lev);
 
/* update the level_table with the new entry */
level_table[l] = (level_des *)lev;
 
/* fill the standard descriptor */
strncpy(lev->l.level_name, RM_LEVELNAME, MAX_LEVELNAME);
lev->l.level_code = RM_LEVEL_CODE;
lev->l.level_version = RM_LEVEL_VERSION;
 
lev->l.level_accept_task_model = RM_level_accept_task_model;
lev->l.level_accept_guest_model = RM_level_accept_guest_model;
lev->l.level_status = RM_level_status;
lev->l.level_scheduler = RM_level_scheduler;
 
if (flags & RM_ENABLE_GUARANTEE)
lev->l.level_guarantee = RM_level_guarantee;
else
lev->l.level_guarantee = NULL;
 
lev->l.task_create = RM_task_create;
lev->l.task_detach = RM_task_detach;
lev->l.task_eligible = RM_task_eligible;
lev->l.task_dispatch = RM_task_dispatch;
lev->l.task_epilogue = RM_task_epilogue;
lev->l.task_activate = RM_task_activate;
lev->l.task_insert = RM_task_insert;
lev->l.task_extract = RM_task_extract;
lev->l.task_endcycle = RM_task_endcycle;
lev->l.task_end = RM_task_end;
lev->l.task_sleep = RM_task_sleep;
 
lev->l.guest_create = RM_guest_create;
lev->l.guest_detach = RM_guest_detach;
lev->l.guest_dispatch = RM_guest_dispatch;
lev->l.guest_epilogue = RM_guest_epilogue;
lev->l.guest_activate = RM_guest_activate;
lev->l.guest_insert = RM_guest_insert;
lev->l.guest_extract = RM_guest_extract;
lev->l.guest_endcycle = RM_guest_endcycle;
lev->l.guest_end = RM_guest_end;
lev->l.guest_sleep = RM_guest_sleep;
 
/* fill the RM descriptor part */
for(i=0; i<MAX_PROC; i++) {
lev->period[i] = 0;
lev->deadline_timer[i] = -1;
lev->flag[i] = 0;
}
 
iq_init(&lev->ready, &freedesc, 0);
lev->flags = flags & 0x07;
lev->U = 0;
}
 
bandwidth_t RM_usedbandwidth(LEVEL l)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
if (lev->l.level_code == RM_LEVEL_CODE &&
lev->l.level_version == RM_LEVEL_VERSION)
return lev->U;
else
return 0;
}
 
/shark/tags/rel_0_2/kernel/modules/rrsoft.c
0,0 → 1,582
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: rrsoft.c,v 1.3 2002-11-11 08:32:07 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:07 $
------------
 
This file contains the scheduling module RRSOFT (Round Robin)
 
Read rrsoft.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRSOFTANTY; without even the implied waRRSOFTanty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/rrsoft.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/*+ Status used in the level +*/
#define RRSOFT_READY MODULE_STATUS_BASE
#define RRSOFT_IDLE MODULE_STATUS_BASE+2
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
IQUEUE ready; /*+ the ready queue +*/
 
int slice; /*+ the level's time slice +*/
 
TIME period[MAX_PROC]; /*+ activation period +*/
 
struct timespec reactivation_time[MAX_PROC];
/*+ the time at witch the reactivation timer is post +*/
int reactivation_timer[MAX_PROC];
/*+ the recativation timer +*/
 
BYTE periodic[MAX_PROC];
 
 
struct multiboot_info *multiboot; /*+ used if the level have to insert
the main task +*/
 
BYTE models; /*+ Task Model that the Module can Handle +*/
} RRSOFT_level_des;
 
 
static char *RRSOFT_status_to_a(WORD status)
{
if (status < MODULE_STATUS_BASE)
return status_to_a(status);
 
switch (status) {
case RRSOFT_READY: return "RRSOFT_Ready";
case RRSOFT_IDLE : return "RRSOFT_Idle";
default : return "RRSOFT_Unknown";
}
}
 
 
/* this is the periodic reactivation of the task... it is posted only
if the task is a periodic task */
static void RRSOFT_timer_reactivate(void *par)
{
PID p = (PID) par;
RRSOFT_level_des *lev;
// kern_printf("react");
 
lev = (RRSOFT_level_des *)level_table[proc_table[p].task_level];
 
if (proc_table[p].status == RRSOFT_IDLE) {
/* the task has finished the current activation and must be
reactivated */
proc_table[p].status = RRSOFT_READY;
iq_insertlast(p,&lev->ready);
 
event_need_reschedule();
}
else if (lev->nact[p] >= 0)
/* the task has not completed the current activation, so we save
the activation incrementing nact... */
lev->nact[p]++;
 
/* repost the event at the next period end... */
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
RRSOFT_timer_reactivate,
(void *)p);
/* tracer stuff */
// trc_logevent(TRC_INTACTIVATION,&p);
}
 
 
static int RRSOFT_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
if ((m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l)) && lev->models & RRSOFT_ONLY_NRT)
return 0;
else if ((m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l)) && lev->models & RRSOFT_ONLY_SOFT)
return 0;
else if ((m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) && lev->models & RRSOFT_ONLY_HARD)
return 0;
else
return -1;
}
 
static int RRSOFT_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
{
return -1;
}
 
static void RRSOFT_level_status(LEVEL l)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->ready);
 
kern_printf("Slice: %d \n", lev->slice);
 
while (p != NIL) {
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
RRSOFT_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->ready);
}
 
for (p=0; p<MAX_PROC; p++)
if (proc_table[p].task_level == l && proc_table[p].status != RRSOFT_READY
&& proc_table[p].status != FREE )
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
RRSOFT_status_to_a(proc_table[p].status));
 
}
 
 
/* This is not efficient but very fair :-)
The need of all this stuff is because if a task execute a long time
due to (shadow!) priority inheritance, then the task shall go to the
tail of the queue many times... */
static PID RRSOFT_level_scheduler(LEVEL l)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
PID p;
 
for (;;) {
p = iq_query_first(&lev->ready);
if (p == -1)
return p;
//{kern_printf("(s%d)",p); return p;}
 
// kern_printf("(p=%d l=%d avail=%d wcet =%d)\n",p,l,proc_table[p].avail_time, proc_table[p].wcet);
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_extract(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
else
//{kern_printf("(s%d)",p); return p;}
return p;
 
}
}
 
static int RRSOFT_level_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
/* the RRSOFT level always guarantee... the function is defined because
there can be an aperiodic server at a level with less priority than
the RRSOFT that need guarantee (e.g., a TBS server) */
return 1;
}
 
 
static int RRSOFT_task_create(LEVEL l, PID p, TASK_MODEL *m)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
// kern_printf("create %d mod %d\n",p,m->pclass);
/* the task state is set at SLEEP by the general task_create
the only thing to set remains the capacity stuffs that are set
to the values passed in the model... */
 
/* I used the wcet field because using wcet can account if a task
consume more than the timeslice... */
 
if (lev->models & RRSOFT_ONLY_NRT &&
(m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))) {
NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m;
 
// kern_printf("nrt");
if (nrt->slice) {
proc_table[p].avail_time = nrt->slice;
proc_table[p].wcet = nrt->slice;
}
else {
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
}
proc_table[p].control |= CONTROL_CAP;
if (nrt->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
 
lev->periodic[p] = 0;
lev->period[p] = 0;
}
else if (lev->models & RRSOFT_ONLY_SOFT &&
(m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l))) {
SOFT_TASK_MODEL *soft = (SOFT_TASK_MODEL *)m;
// kern_printf("soft");
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
proc_table[p].control |= CONTROL_CAP;
if (soft->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
 
if (soft->periodicity == PERIODIC) {
lev->periodic[p] = 1;
lev->period[p] = soft->period;
}
}
else if (lev->models & RRSOFT_ONLY_HARD &&
(m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l))) {
HARD_TASK_MODEL *hard = (HARD_TASK_MODEL *)m;
// kern_printf("hard");
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
proc_table[p].control |= CONTROL_CAP;
lev->nact[p] = 0;
 
if (hard->periodicity == PERIODIC) {
lev->periodic[p] = 1;
lev->period[p] = hard->mit;
}
}
 
return 0; /* OK */
}
 
static void RRSOFT_task_detach(LEVEL l, PID p)
{
/* the RRSOFT level doesn't introduce any new field in the TASK_MODEL
so, all detach stuffs are done by the task_create
The task state is set at FREE by the general task_create */
}
 
static int RRSOFT_task_eligible(LEVEL l, PID p)
{
return 0; /* if the task p is chosen, it is always eligible */
}
 
static void RRSOFT_task_dispatch(LEVEL l, PID p, int nostop)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
//static int p2count=0;
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
static void RRSOFT_task_epilogue(LEVEL l, PID p)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
/* check if the slice is finished and insert the task in the coRRSOFTect
qqueue position */
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_insertlast(p,&lev->ready);
}
else
/* curr is >0, so the running task have to run for another cuRRSOFT usec */
iq_insertfirst(p,&lev->ready);
 
proc_table[p].status = RRSOFT_READY;
}
 
static void RRSOFT_task_activate(LEVEL l, PID p)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
/* Test if we are trying to activate a non sleeping task */
/* save activation (only if needed... */
if (proc_table[p].status != SLEEP && proc_table[p].status != RRSOFT_IDLE) {
if (lev->nact[p] != -1)
lev->nact[p]++;
return;
}
 
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
/* Insert task in the coRRSOFTect position */
proc_table[p].status = RRSOFT_READY;
iq_insertlast(p,&lev->ready);
 
/* Set the reactivation timer */
if (lev->periodic[p])
{
TIMESPEC_ASSIGN(&lev->reactivation_time[p], &proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
RRSOFT_timer_reactivate,
(void *)p);
}
}
 
static void RRSOFT_task_insert(LEVEL l, PID p)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
/* Similar to RRSOFT_task_activate, but we don't check in what state
the task is and we don't set the request_time */
 
/* Insert task in the coRRSOFTect position */
proc_table[p].status = RRSOFT_READY;
iq_insertlast(p,&lev->ready);
}
 
static void RRSOFT_task_extract(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
 
So, we do nothing!!!
*/
}
 
static void RRSOFT_task_endcycle(LEVEL l, PID p)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
if (lev->nact[p] > 0) {
/* continue!!!! */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
lev->nact[p]--;
// qq_insertlast(p,&lev->ready);
iq_insertfirst(p,&lev->ready);
proc_table[p].status = RRSOFT_READY;
}
else
proc_table[p].status = RRSOFT_IDLE;
}
 
static void RRSOFT_task_end(LEVEL l, PID p)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
lev->nact[p] = -1;
 
/* we delete the reactivation timer */
if (lev->periodic[p]) {
event_delete(lev->reactivation_timer[p]);
lev->reactivation_timer[p] = -1;
}
 
/* then, we insert the task in the free queue */
proc_table[p].status = FREE;
iq_insertlast(p,&freedesc);
}
 
static void RRSOFT_task_sleep(LEVEL l, PID p)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
if (lev->nact[p] >= 0) lev->nact[p] = 0;
 
/* we delete the reactivation timer */
if (lev->periodic[p]) {
event_delete(lev->reactivation_timer[p]);
lev->reactivation_timer[p] = -1;
}
 
proc_table[p].status = SLEEP;
}
 
static int RRSOFT_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void RRSOFT_guest_detach(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_activate(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_insert(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_extract(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_end(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
static void RRSOFT_call_main(void *l)
{
LEVEL lev;
PID p;
NRT_TASK_MODEL m;
void *mb;
 
lev = (LEVEL)l;
 
nrt_task_default_model(m);
nrt_task_def_level(m,lev); /* with this we are sure that the task aRRSOFTives
to the coRRSOFTect level */
 
mb = ((RRSOFT_level_des *)level_table[lev])->multiboot;
nrt_task_def_arg(m,mb);
nrt_task_def_usemath(m);
nrt_task_def_nokill(m);
nrt_task_def_ctrl_jet(m);
 
p = task_create("Main", __init__, (TASK_MODEL *)&m, NULL);
 
if (p == NIL)
printk("\nPanic!!! can't create main task...\n");
 
RRSOFT_task_activate(lev,p);
}
 
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified +*/
void RRSOFT_register_level(TIME slice,
int createmain,
struct multiboot_info *mb,
BYTE models)
{
LEVEL l; /* the level that we register */
RRSOFT_level_des *lev; /* for readableness only */
PID i;
 
printk("RRSOFT_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor();
 
/* alloc the space needed for the RRSOFT_level_des */
lev = (RRSOFT_level_des *)kern_alloc(sizeof(RRSOFT_level_des));
 
printk(" lev=%d\n",(int)lev);
 
/* update the level_table with the new entry */
level_table[l] = (level_des *)lev;
 
/* fill the standard descriptor */
strncpy(lev->l.level_name, RRSOFT_LEVELNAME, MAX_LEVELNAME);
lev->l.level_code = RRSOFT_LEVEL_CODE;
lev->l.level_version = RRSOFT_LEVEL_VERSION;
 
lev->l.level_accept_task_model = RRSOFT_level_accept_task_model;
lev->l.level_accept_guest_model = RRSOFT_level_accept_guest_model;
lev->l.level_status = RRSOFT_level_status;
lev->l.level_scheduler = RRSOFT_level_scheduler;
lev->l.level_guarantee = RRSOFT_level_guarantee;
 
lev->l.task_create = RRSOFT_task_create;
lev->l.task_detach = RRSOFT_task_detach;
lev->l.task_eligible = RRSOFT_task_eligible;
lev->l.task_dispatch = RRSOFT_task_dispatch;
lev->l.task_epilogue = RRSOFT_task_epilogue;
lev->l.task_activate = RRSOFT_task_activate;
lev->l.task_insert = RRSOFT_task_insert;
lev->l.task_extract = RRSOFT_task_extract;
lev->l.task_endcycle = RRSOFT_task_endcycle;
lev->l.task_end = RRSOFT_task_end;
lev->l.task_sleep = RRSOFT_task_sleep;
 
lev->l.guest_create = RRSOFT_guest_create;
lev->l.guest_detach = RRSOFT_guest_detach;
lev->l.guest_dispatch = RRSOFT_guest_dispatch;
lev->l.guest_epilogue = RRSOFT_guest_epilogue;
lev->l.guest_activate = RRSOFT_guest_activate;
lev->l.guest_insert = RRSOFT_guest_insert;
lev->l.guest_extract = RRSOFT_guest_extract;
lev->l.guest_endcycle = RRSOFT_guest_endcycle;
lev->l.guest_end = RRSOFT_guest_end;
lev->l.guest_sleep = RRSOFT_guest_sleep;
 
/* fill the RRSOFT descriptor part */
for (i = 0; i < MAX_PROC; i++) {
lev->nact[i] = -1;
NULL_TIMESPEC(&lev->reactivation_time[i]);
lev->reactivation_timer[i] = -1;
lev->periodic[i] = 0;
lev->period[i] = 0;
}
 
iq_init(&lev->ready, &freedesc, 0);
 
if (slice < RRSOFT_MINIMUM_SLICE) slice = RRSOFT_MINIMUM_SLICE;
if (slice > RRSOFT_MAXIMUM_SLICE) slice = RRSOFT_MAXIMUM_SLICE;
lev->slice = slice;
 
lev->multiboot = mb;
 
lev->models = models;
 
if (createmain)
sys_atrunlevel(RRSOFT_call_main,(void *) l, RUNLEVEL_INIT);
}
 
 
 
 
/shark/tags/rel_0_2/kernel/modules/ps.c
0,0 → 1,699
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: ps.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the aperiodic server PS (Polling Server)
 
when scheduling in background the flags field has the PS_BACKGROUND bit set
 
when scheduling a task because it is pointed by another task via shadows,
the task have to be extracted from the wait queue or the master level. To
check this we have to look at the activated field; it is != NIL if a task
is inserted into the master level. Only a task at a time can be inserted
into the master level.
 
The capacity of the server must be updated
- when scheduling a task normally
- when scheduling a task because it is pointed by a shadow
but not when scheduling in background.
 
When a task is extracted from the system no scheduling has to be done
until the task reenter into the system. to implement this, when a task
is extracted we block the background scheduling (the scheduling with the
master level is already blocked because the activated field is not
reset to NIL) using the PS_BACKGROUNDBLOCK bit.
 
nact[p] is -1 if the task set the activations to SKIP, >= 0 otherwise
 
Note that if the period event fires and there aren't any task to schedule,
the server capacity is set to 0. This is correct, but there is a subtle
variant: the server capacity may be set to 0 later because if at the
period end the running task have priority > than the server, the capacity
may be set to zero the first time the server become the highest priority
running task and there aren't task to serve. The second implementation
is more efficient but more complicated, because normally we don't know the
priority of the running task.
 
An implementation can be done in this way: when there are not task to
schedule, we does not set the lev->activated field to nil, but to a "dummy"
task that is inserted into the master level queue.
When the master level scheduler try to schedule the "dummy" task (this is
the situation in witch there are not task to schedule and the PS is the
task with greater priority), it calls the PS_task_eligible, that set the
server capacity to 0, remove the dummy task from the queue with a guest_end
and ask to reschedule.
 
Because this implementation is more complex than the first, I don't
implement it... see (*), near line 169, 497 and 524
 
 
Read PS.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/ps.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/*+ Status used in the level +*/
#define PS_WAIT APER_STATUS_BASE /*+ waiting the service +*/
 
/*+ the level redefinition for the Total Bandwidth Server level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
/* The wcet are stored in the task descriptor's priority
field, so no other fields are needed */
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
struct timespec lastdline; /*+ the last deadline assigned to
a PS task +*/
 
int Cs; /*+ server capacity +*/
int availCs; /*+ server avail time +*/
 
IQUEUE wait; /*+ the wait queue of the PS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth by the server +*/
int period;
 
LEVEL scheduling_level;
 
} PS_level_des;
 
/* This static function activates the task pointed by lev->activated) */
static __inline__ void PS_activation(PS_level_des *lev)
{
PID p; /* for readableness */
JOB_TASK_MODEL j; /* the guest model */
LEVEL m; /* the master level... only for readableness*/
 
p = lev->activated;
m = lev->scheduling_level;
job_task_default_model(j,lev->lastdline);
job_task_def_period(j,lev->period);
level_table[m]->guest_create(m,p,(TASK_MODEL *)&j);
level_table[m]->guest_activate(m,p);
// kern_printf("(%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec);
}
 
static void PS_deadline_timer(void *a)
{
PS_level_des *lev = (PS_level_des *)(level_table[(LEVEL)a]);
 
ADDUSEC2TIMESPEC(lev->period, &lev->lastdline);
 
// kern_printf("(%d:%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec, lev->period);
if (lev->availCs >= 0)
lev->availCs = lev->Cs;
else
lev->availCs += lev->Cs;
 
/* availCs may be <0 because a task executed via a shadow fo many time
lev->activated == NIL only if the prec task was finished and there
was not any other task to be put in the ready queue
... we are now activating the next task */
if (lev->availCs > 0 && lev->activated == NIL) {
if (iq_query_first(&lev->wait) != NIL) {
lev->activated = iq_getfirst(&lev->wait);
PS_activation(lev);
event_need_reschedule();
}
else
lev->availCs = 0; /* see note (*) at the begin of the file */
}
 
kern_event_post(&lev->lastdline, PS_deadline_timer, a);
// kern_printf("!");
}
 
static char *PS_status_to_a(WORD status)
{
if (status < MODULE_STATUS_BASE)
return status_to_a(status);
 
switch (status) {
case PS_WAIT : return "PS_Wait";
default : return "PS_Unknown";
}
}
 
 
static int PS_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) {
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m;
 
if (s->periodicity == APERIODIC)
return 0;
}
return -1;
}
 
static int PS_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
{
return -1;
}
 
static char *onoff(int i)
{
if (i)
return "On ";
else
return "Off";
}
 
static void PS_level_status(LEVEL l)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->wait);
 
kern_printf("On-line guarantee : %s\n",
onoff(lev->flags & PS_ENABLE_GUARANTEE_EDF ||
lev->flags & PS_ENABLE_GUARANTEE_RM ));
kern_printf("Used Bandwidth : %u/%u\n",
lev->U, MAX_BANDWIDTH);
 
if (lev->activated != -1)
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
iq_query_timespec(lev->activated,&lev->wait)->tv_sec,
iq_query_timespec(lev->activated,&lev->wait)->tv_nsec,
lev->nact[lev->activated],
PS_status_to_a(proc_table[lev->activated].status));
 
while (p != NIL) {
kern_printf("Pid: %2d Name: %10s Stat: %s\n",
p,
proc_table[p].name,
PS_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->wait);
}
}
 
static PID PS_level_scheduler(LEVEL l)
{
/* the PS don't schedule anything...
it's an EDF level or similar that do it! */
return NIL;
}
 
static PID PS_level_schedulerbackground(LEVEL l)
{
/* the PS catch the background time to exec aperiodic activities */
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
lev->flags |= PS_BACKGROUND;
 
if (lev->flags & PS_BACKGROUND_BLOCK)
return NIL;
else
return iq_query_first(&lev->wait);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int PS_level_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int PS_level_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
if (*freebandwidth > lev->U + RM_MINFREEBANDWIDTH) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int PS_task_create(LEVEL l, PID p, TASK_MODEL *m)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
/* if the PS_task_create is called, then the pclass must be a
valid pclass. */
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m;
 
if (s->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void PS_task_detach(LEVEL l, PID p)
{
/* the PS level doesn't introduce any dinamic allocated new field. */
}
 
static int PS_task_eligible(LEVEL l, PID p)
{
return 0; /* if the task p is chosen, it is always eligible */
}
 
static void PS_task_dispatch(LEVEL l, PID p, int nostop)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
struct timespec ty;
 
// if (nostop) kern_printf("NOSTOP!!!!!!!!!!!!");
/* there is at least one task ready inserted in an EDF or similar
level note that we can't check the status because the scheduler set it
to exe before calling task_dispatch. we have to check
lev->activated != p instead */
if (lev->activated != p) {
iq_extract(p, &lev->wait);
//kern_printf("#%d#",p);
}
else {
//if (nostop) kern_printf("(gd status=%d)",proc_table[p].status);
level_table[ lev->scheduling_level ]->
guest_dispatch(lev->scheduling_level,p,nostop);
}
 
/* set the capacity timer */
if (!nostop) {
TIMESPEC_ASSIGN(&ty, &schedule_time);
ADDUSEC2TIMESPEC(lev->availCs,&ty);
cap_timer = kern_event_post(&ty, capacity_timer, NULL);
}
 
// kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec);
}
 
static void PS_task_epilogue(LEVEL l, PID p)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & PS_BACKGROUND)
lev->flags &= ~PS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
// kern_printf("(epil %d %d)",lev->availCs, proc_table[p].avail_time);
 
/* check if the server capacity is finished... */
if (lev->availCs < 0) {
// kern_printf("(epil Cs%d %d:%d act%d p%d)",
// lev->availCs,proc_table[p].timespec_priority.tv_sec,
// proc_table[p].timespec_priority.tv_nsec,
// lev->activated,p);
/* the server slice has finished... do the task_end!!!
a first version of the module used the task_endcycle, but it was
not conceptually correct because the task didn't stop because it
finished all the work but because the server didn't have budget!
So, if the task_endcycle is called, the task remain into the
master level, and we can't wake him up if, for example, another
task point the shadow to it!!!*/
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = PS_WAIT;
lev->activated = NIL;
}
else
/* the task has been preempted. it returns into the ready queue or to the
wait queue by calling the guest_epilogue... */
if (lev->activated == p) {//kern_printf("Û1");
level_table[ lev->scheduling_level ]->
guest_epilogue(lev->scheduling_level,p);
} else { //kern_printf("Û2");
iq_insertfirst(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
}
 
static void PS_task_activate(LEVEL l, PID p)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
if (lev->activated == p || proc_table[p].status == PS_WAIT) {
if (lev->nact[p] != -1)
lev->nact[p]++;
}
else if (proc_table[p].status == SLEEP) {
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
if (lev->activated == NIL && lev->availCs > 0) {
lev->activated = p;
PS_activation(lev);
}
else {
iq_insertlast(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
}
else
{ kern_printf("PS_REJ%d %d %d %d ",p, proc_table[p].status, lev->activated, lev->wait.first);
return; }
 
}
 
static void PS_task_insert(LEVEL l, PID p)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
lev->flags &= ~PS_BACKGROUND_BLOCK;
 
lev->activated = -1;
 
/* when we reinsert the task into the system, the server capacity
is always 0 because nobody executes with the PS before... */
iq_insertfirst(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
 
static void PS_task_extract(LEVEL l, PID p)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
/* update the server capacity */
lev->availCs = 0;
 
lev->flags |= PS_BACKGROUND_BLOCK;
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
}
 
static void PS_task_endcycle(LEVEL l, PID p)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & PS_BACKGROUND)
lev->flags &= ~PS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
 
if (lev->nact[p] > 0)
{
lev->nact[p]--;
iq_insertlast(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
else
proc_table[p].status = SLEEP;
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated == NIL)
lev->availCs = 0; /* see note (*) at the begin of the file */
else
PS_activation(lev);
}
 
static void PS_task_end(LEVEL l, PID p)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & PS_BACKGROUND)
lev->flags &= ~PS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
 
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated == NIL)
lev->availCs = 0; /* see note (*) at the begin of the file */
else
PS_activation(lev);
}
 
static void PS_task_sleep(LEVEL l, PID p)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & PS_BACKGROUND)
lev->flags &= ~PS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
if (lev->nact[p] >= 0) lev->nact[p] = 0;
 
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
 
proc_table[p].status = SLEEP;
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated == NIL)
lev->availCs = 0; /* see note (*) at the begin of the file */
else
PS_activation(lev);
}
 
 
static int PS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void PS_guest_detach(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_activate(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_insert(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_extract(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_end(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
 
/*+ This init function install the PS deadline timer
+*/
static void PS_dline_install(void *l)
{
PS_level_des *lev = (PS_level_des *)(level_table[(LEVEL)l]);
 
ll_gettime(TIME_EXACT,&lev->lastdline);
ADDUSEC2TIMESPEC(lev->period, &lev->lastdline);
 
kern_event_post(&lev->lastdline, PS_deadline_timer, l);
}
 
 
 
/*+ Registration function:
int flags the init flags ... see PS.h +*/
void PS_register_level(int flags, LEVEL master, int Cs, int per)
{
LEVEL l; /* the level that we register */
PS_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("PS_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor();
 
printk(" alloco descrittore %d %d\n",l,(int)sizeof(PS_level_des));
 
/* alloc the space needed for the PS_level_des */
lev = (PS_level_des *)kern_alloc(sizeof(PS_level_des));
 
printk(" lev=%d\n",(int)lev);
 
/* update the level_table with the new entry */
level_table[l] = (level_des *)lev;
 
/* fill the standard descriptor */
strncpy(lev->l.level_name, PS_LEVELNAME, MAX_LEVELNAME);
lev->l.level_code = PS_LEVEL_CODE;
lev->l.level_version = PS_LEVEL_VERSION;
 
lev->l.level_accept_task_model = PS_level_accept_task_model;
lev->l.level_accept_guest_model = PS_level_accept_guest_model;
lev->l.level_status = PS_level_status;
 
if (flags & PS_ENABLE_BACKGROUND)
lev->l.level_scheduler = PS_level_schedulerbackground;
else
lev->l.level_scheduler = PS_level_scheduler;
 
if (flags & PS_ENABLE_GUARANTEE_EDF)
lev->l.level_guarantee = PS_level_guaranteeEDF;
else if (flags & PS_ENABLE_GUARANTEE_RM)
lev->l.level_guarantee = PS_level_guaranteeRM;
else
lev->l.level_guarantee = NULL;
 
lev->l.task_create = PS_task_create;
lev->l.task_detach = PS_task_detach;
lev->l.task_eligible = PS_task_eligible;
lev->l.task_dispatch = PS_task_dispatch;
lev->l.task_epilogue = PS_task_epilogue;
lev->l.task_activate = PS_task_activate;
lev->l.task_insert = PS_task_insert;
lev->l.task_extract = PS_task_extract;
lev->l.task_endcycle = PS_task_endcycle;
lev->l.task_end = PS_task_end;
lev->l.task_sleep = PS_task_sleep;
 
lev->l.guest_create = PS_guest_create;
lev->l.guest_detach = PS_guest_detach;
lev->l.guest_dispatch = PS_guest_dispatch;
lev->l.guest_epilogue = PS_guest_epilogue;
lev->l.guest_activate = PS_guest_activate;
lev->l.guest_insert = PS_guest_insert;
lev->l.guest_extract = PS_guest_extract;
lev->l.guest_endcycle = PS_guest_endcycle;
lev->l.guest_end = PS_guest_end;
lev->l.guest_sleep = PS_guest_sleep;
 
/* fill the PS descriptor part */
 
for (i=0; i<MAX_PROC; i++)
lev->nact[i] = -1;
 
lev->Cs = Cs;
lev->availCs = 0;
 
lev->period = per;
 
iq_init(&lev->wait, &freedesc, 0);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / per) * Cs;
 
lev->scheduling_level = master;
 
lev->flags = flags & 0x07;
 
sys_atrunlevel(PS_dline_install,(void *) l, RUNLEVEL_INIT);
}
 
bandwidth_t PS_usedbandwidth(LEVEL l)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
if (lev->l.level_code == PS_LEVEL_CODE &&
lev->l.level_version == PS_LEVEL_VERSION)
return lev->U;
else
return 0;
}
 
/shark/tags/rel_0_2/kernel/modules/rr.c
0,0 → 1,422
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: rr.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the scheduling module RR (Round Robin)
 
Read rr.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/rr.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/*+ Status used in the level +*/
#define RR_READY MODULE_STATUS_BASE
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
IQUEUE ready; /*+ the ready queue +*/
 
int slice; /*+ the level's time slice +*/
 
struct multiboot_info *multiboot; /*+ used if the level have to insert
the main task +*/
} RR_level_des;
 
 
static char *RR_status_to_a(WORD status)
{
if (status < MODULE_STATUS_BASE)
return status_to_a(status);
 
switch (status) {
case RR_READY: return "RR_Ready";
default : return "RR_Unknown";
}
}
 
static int RR_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))
return 0;
else
return -1;
}
 
static int RR_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
{
return -1;
}
 
static void RR_level_status(LEVEL l)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->ready);
 
kern_printf("Slice: %d \n", lev->slice);
 
while (p != NIL) {
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
RR_status_to_a(proc_table[p].status));
p = iq_query_next(p,&lev->ready);
}
 
for (p=0; p<MAX_PROC; p++)
if (proc_table[p].task_level == l && proc_table[p].status != RR_READY
&& proc_table[p].status != FREE )
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
RR_status_to_a(proc_table[p].status));
 
}
 
 
/* This is not efficient but very fair :-)
The need of all this stuff is because if a task execute a long time
due to (shadow!) priority inheritance, then the task shall go to the
tail of the queue many times... */
static PID RR_level_scheduler(LEVEL l)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
PID p;
 
for (;;) {
p = iq_query_first(&lev->ready);
if (p == -1)
return p;
 
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_extract(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
else
return p;
}
}
 
static int RR_level_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
/* the RR level always guarantee... the function is defined because
there can be an aperiodic server at a level with less priority than
the RR that need guarantee (e.g., a TBS server) */
return 1;
}
 
 
static int RR_task_create(LEVEL l, PID p, TASK_MODEL *m)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m;
 
/* the task state is set at SLEEP by the general task_create
the only thing to set remains the capacity stuffs that are set
to the values passed in the model... */
 
/* I used the wcet field because using wcet can account if a task
consume more than the timeslice... */
 
if (nrt->slice) {
proc_table[p].avail_time = nrt->slice;
proc_table[p].wcet = nrt->slice;
}
else {
proc_table[p].avail_time = lev->slice;
proc_table[p].wcet = lev->slice;
}
proc_table[p].control |= CONTROL_CAP;
 
return 0; /* OK */
}
 
static void RR_task_detach(LEVEL l, PID p)
{
/* the RR level doesn't introduce any new field in the TASK_MODEL
so, all detach stuffs are done by the task_create
The task state is set at FREE by the general task_create */
}
 
static int RR_task_eligible(LEVEL l, PID p)
{
return 0; /* if the task p is chosen, it is always eligible */
}
 
static void RR_task_dispatch(LEVEL l, PID p, int nostop)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
static void RR_task_epilogue(LEVEL l, PID p)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
/* check if the slice is finished and insert the task in the correct
qqueue position */
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_insertlast(p,&lev->ready);
}
else
/* curr is >0, so the running task have to run for another curr usec */
iq_insertfirst(p,&lev->ready);
 
proc_table[p].status = RR_READY;
}
 
static void RR_task_activate(LEVEL l, PID p)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
/* Test if we are trying to activate a non sleeping task */
/* Ignore this; the task is already active */
if (proc_table[p].status != SLEEP)
return;
 
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
/* Insert task in the correct position */
proc_table[p].status = RR_READY;
iq_insertlast(p,&lev->ready);
}
 
static void RR_task_insert(LEVEL l, PID p)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
/* Similar to RR_task_activate, but we don't check in what state
the task is and we don't set the request_time */
 
/* Insert task in the correct position */
proc_table[p].status = RR_READY;
iq_insertlast(p,&lev->ready);
}
 
static void RR_task_extract(LEVEL l, PID p)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
 
So, we do nothing!!!
*/
}
 
static void RR_task_endcycle(LEVEL l, PID p)
{
// RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
/* this function is equal to the RR_task_extract, except that
the task fall asleep... */
proc_table[p].status = SLEEP;
}
 
static void RR_task_end(LEVEL l, PID p)
{
// RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
/* we insert the task in the free queue */
proc_table[p].status = FREE;
iq_insertlast(p,&freedesc);
}
 
static void RR_task_sleep(LEVEL l, PID p)
{
proc_table[p].status = SLEEP;
}
 
static int RR_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void RR_guest_detach(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_activate(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_insert(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_extract(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_end(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
static void RR_call_main(void *l)
{
LEVEL lev;
PID p;
NRT_TASK_MODEL m;
void *mb;
 
lev = (LEVEL)l;
 
nrt_task_default_model(m);
nrt_task_def_level(m,lev); /* with this we are sure that the task arrives
to the correct level */
 
mb = ((RR_level_des *)level_table[lev])->multiboot;
nrt_task_def_arg(m,mb);
nrt_task_def_usemath(m);
nrt_task_def_nokill(m);
nrt_task_def_ctrl_jet(m);
 
p = task_create("Main", __init__, (TASK_MODEL *)&m, NULL);
 
if (p == NIL)
kern_printf("\nPanic!!! can't create main task... errno =%d\n",errno);
 
RR_task_activate(lev,p);
}
 
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified +*/
void RR_register_level(TIME slice,
int createmain,
struct multiboot_info *mb)
{
LEVEL l; /* the level that we register */
RR_level_des *lev; /* for readableness only */
 
printk("RR_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor();
 
/* alloc the space needed for the RR_level_des */
lev = (RR_level_des *)kern_alloc(sizeof(RR_level_des));
 
printk(" lev=%d\n",(int)lev);
 
/* update the level_table with the new entry */
level_table[l] = (level_des *)lev;
 
/* fill the standard descriptor */
strncpy(lev->l.level_name, RR_LEVELNAME, MAX_LEVELNAME);
lev->l.level_code = RR_LEVEL_CODE;
lev->l.level_version = RR_LEVEL_VERSION;
 
lev->l.level_accept_task_model = RR_level_accept_task_model;
lev->l.level_accept_guest_model = RR_level_accept_guest_model;
lev->l.level_status = RR_level_status;
lev->l.level_scheduler = RR_level_scheduler;
lev->l.level_guarantee = RR_level_guarantee;
 
lev->l.task_create = RR_task_create;
lev->l.task_detach = RR_task_detach;
lev->l.task_eligible = RR_task_eligible;
lev->l.task_dispatch = RR_task_dispatch;
lev->l.task_epilogue = RR_task_epilogue;
lev->l.task_activate = RR_task_activate;
lev->l.task_insert = RR_task_insert;
lev->l.task_extract = RR_task_extract;
lev->l.task_endcycle = RR_task_endcycle;
lev->l.task_end = RR_task_end;
lev->l.task_sleep = RR_task_sleep;
 
lev->l.guest_create = RR_guest_create;
lev->l.guest_detach = RR_guest_detach;
lev->l.guest_dispatch = RR_guest_dispatch;
lev->l.guest_epilogue = RR_guest_epilogue;
lev->l.guest_activate = RR_guest_activate;
lev->l.guest_insert = RR_guest_insert;
lev->l.guest_extract = RR_guest_extract;
lev->l.guest_endcycle = RR_guest_endcycle;
lev->l.guest_end = RR_guest_end;
lev->l.guest_sleep = RR_guest_sleep;
 
/* fill the RR descriptor part */
iq_init(&lev->ready, &freedesc, 0);
 
if (slice < RR_MINIMUM_SLICE) slice = RR_MINIMUM_SLICE;
if (slice > RR_MAXIMUM_SLICE) slice = RR_MAXIMUM_SLICE;
lev->slice = slice;
 
lev->multiboot = mb;
 
if (createmain)
sys_atrunlevel(RR_call_main,(void *) l, RUNLEVEL_INIT);
}
 
 
/shark/tags/rel_0_2/kernel/modules/sem.c
0,0 → 1,724
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: sem.c,v 1.2 2002-11-11 08:32:07 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:32:07 $
------------
 
This file contains the Hartik 3.3.1 Semaphore functions
 
Author: Giuseppe Lipari
 
Semaphores:
this is the generalized version of the primitives signal & wait
In this case, the user can specify the number to inc/dec the
semaphore's counter. It is useful in the buffer management
(see port section)
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <stdarg.h>
#include <modules/sem.h>
#include <kernel/config.h>
#include <ll/ll.h>
#include <ll/string.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
#include <kernel/trace.h>
#include <limits.h>
#include <fcntl.h>
 
 
/* Semaphores descriptor tables */
static struct sem_des {
char *name; /* a name, for named semaphores */
int index; /* an index for sem_open, containing the sem number */
int count; /* the semaphore counter */
IQUEUE blocked; /* the blocked processes queue */
int next; /* the semaphore queue */
BYTE used; /* 1 if the semaphore is used */
} sem_table[SEM_NSEMS_MAX];
 
 
/* this -IS- an extension to the proc_table!!! */
static struct {
int decsem; /* the value required in sem_xwait */
int sem; /* the semaphore on whitch the process is blocked */
} sp_table[MAX_PROC];
 
static int free_sem; /* Queue of free sem */
 
 
 
/*----------------------------------------------------------------------*/
/* Cancellation test for semaphores */
/*----------------------------------------------------------------------*/
 
/* this is the test that is done when a task is being killed
and it is waiting on a sigwait */
static int semwait_cancellation_point(PID i, void *arg)
{
LEVEL l;
 
if (proc_table[i].status == WAIT_SEM) {
/* the task that have to be killed is waiting on a sig_wait.
we reset the data structures set in sig_wait and then when the
task will return on the sig_wait it will fall into a
task_testcancel */
 
/* extract the process from the semaphore queue... */
iq_extract(i,&sem_table[ sp_table[i].sem ].blocked);
 
l = proc_table[i].task_level;
level_table[l]->task_insert(l,i);
 
return 1;
}
 
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Init the semaphoric structures */
/*----------------------------------------------------------------------*/
void SEM_register_module(void)
{
int i;
 
for (i = 0; i < SEM_NSEMS_MAX; i++) {
sem_table[i].name = NULL;
sem_table[i].index = i;
sem_table[i].count = 0;
iq_init(&sem_table[i].blocked, &freedesc, 0);
sem_table[i].next = i+1;
sem_table[i].used = 0;
}
sem_table[SEM_NSEMS_MAX-1].next = NIL;
free_sem = 0;
 
register_cancellation_point(semwait_cancellation_point, NULL);
}
 
/*----------------------------------------------------------------------*/
/* Allocate a semaphoric descriptor and sets the counter to n */
/*----------------------------------------------------------------------*/
 
// the pshared parameter is NRQ for PSE52
int sem_init(sem_t *sem, int pshared, unsigned int value)
{
if (value > SEM_VALUE_MAX)
return EINVAL;
 
kern_cli();
*sem = free_sem;
if (*sem != NIL) {
free_sem = sem_table[*sem].next;
sem_table[*sem].name = NULL;
sem_table[*sem].count = value;
iq_init(&sem_table[*sem].blocked, &freedesc, 0);
sem_table[*sem].used = 1;
}
else {
errno = ENOSPC;
kern_sti();
return -1;
}
kern_sti();
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Frees a semaphores descriptor */
/*----------------------------------------------------------------------*/
int sem_destroy(sem_t *sem)
{
kern_cli();
 
if (*sem < 0 || *sem >= SEM_NSEMS_MAX || !sem_table[*sem].used) {
errno = EINVAL;
kern_sti();
return -1;
}
 
if (sem_table[*sem].blocked.first != NIL) {
errno = EBUSY;
kern_sti();
return -1;
}
 
sem_table[*sem].used = 0;
sem_table[*sem].next = free_sem;
free_sem = *sem;
 
kern_sti();
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Allocate a named semaphore */
/*----------------------------------------------------------------------*/
 
// the pshared parameter is NRQ for PSE52
sem_t *sem_open(const char *name, int oflag, ...)
{
int i, j;
int found = 0;
mode_t m;
sem_t sem;
 
kern_cli();
 
for (i = 0; i < SEM_NSEMS_MAX; i++)
if (sem_table[i].used) {
if (strcmp(name, sem_table[i].name) == 0) {
found = 1;
break;
}
}
if (found) {
if (oflag == (O_CREAT | O_EXCL)) {
errno = EEXIST;
kern_sti();
return SEM_FAILED;
} else {
kern_sti();
return &sem_table[i].index;
}
} else {
if (!(oflag & O_CREAT)) {
errno = ENOENT;
kern_sti();
return SEM_FAILED;
} else {
va_list l;
 
va_start(l, oflag);
m = va_arg(l,mode_t);
j = va_arg(l, int);
va_end(l);
 
if (j > SEM_VALUE_MAX) {
errno = EINVAL;
kern_sti();
return SEM_FAILED;
}
 
sem = free_sem;
if (sem != -1) {
free_sem = sem_table[sem].next;
sem_table[sem].name = kern_alloc(strlen((char *)name)+1);
strcpy(sem_table[sem].name, (char *)name);
sem_table[sem].count = j;
iq_init(&sem_table[sem].blocked, &freedesc, 0);
sem_table[sem].used = 1;
kern_sti();
return &sem_table[sem].index;
}
else {
errno = ENOSPC;
kern_sti();
return SEM_FAILED;
}
}
}
}
 
/*----------------------------------------------------------------------*/
/* Frees a named semaphore */
/*----------------------------------------------------------------------*/
int sem_close(sem_t *sem)
{
kern_cli();
 
if (*sem < 0 || *sem >= SEM_NSEMS_MAX || !sem_table[*sem].used) {
errno = EINVAL;
kern_sti();
return -1;
}
 
/* why not???
if (sem_table[*sem].q_first != -1) {
errno = EBUSY;
kern_sti();
return -1;
} */
 
kern_free(sem_table[*sem].name,strlen(sem_table[*sem].name)+1);
sem_table[*sem].used = 0;
sem_table[*sem].next = free_sem;
free_sem = *sem;
 
kern_sti();
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Unlink a named semaphore */
/*----------------------------------------------------------------------*/
int sem_unlink(const char *name)
{
int i;
int found = 0;
 
kern_cli();
 
for (i = 0; i < SEM_NSEMS_MAX; i++)
if (sem_table[i].used) {
if (strcmp(name, sem_table[i].name) == 0) {
found = 1;
}
}
 
if (found) {
kern_free(sem_table[i].name,strlen((char *)name)+1);
sem_table[i].used = 0;
sem_table[i].next = free_sem;
free_sem = i;
kern_sti();
return 0;
} else {
errno = ENOENT;
kern_sti();
return SEM_FAILED;
}
}
 
/*----------------------------------------------------------------------*/
/* Generic wait. If it is possible, decrements the sem counter of n, */
/* else blocks the task. */
/*----------------------------------------------------------------------*/
int sem_wait(sem_t *s)
{
struct sem_des *s1; /* It speeds up access */
 
if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
errno = EINVAL;
return -1;
}
 
task_testcancel();
 
proc_table[exec_shadow].context = kern_context_save();
 
s1 = &sem_table[*s];
 
if (s1->blocked.first != NIL || s1->count == 0) {
/* We must block exec task */
LEVEL l; /* for readableness only */
TIME tx; /* a dummy TIME for timespec operations */
struct timespec ty; /* a dummy timespec for timespec operations */
 
/* tracer stuff */
trc_logevent(TRC_SEM_WAIT,s);
 
/* SAME AS SCHEDULER... manage the capacity event and the load_info */
ll_gettime(TIME_EXACT, &schedule_time);
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
proc_table[exec_shadow].avail_time -= tx;
jet_update_slice(tx);
if (cap_timer != NIL) {
event_delete(cap_timer);
cap_timer = NIL;
}
l = proc_table[exec_shadow].task_level;
level_table[l]->task_extract(l,exec_shadow);
 
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = WAIT_SEM;
 
/* Prepare sem_table des... */
sp_table[exec_shadow].decsem = 1;
sp_table[exec_shadow].sem = *s;
 
/* ...and put it in sem queue */
iq_insertlast(exec_shadow,&s1->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
kern_context_load(proc_table[exec_shadow].context);
 
/* sem_wait is a cancellation point... */
task_testcancel();
}
else {
s1->count--;
/* tracer stuff */
trc_logevent(TRC_SEM_WAIT,s);
kern_context_load(proc_table[exec_shadow].context);
}
 
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Non-blocking wait */
/*----------------------------------------------------------------------*/
int sem_trywait(sem_t *s)
{
struct sem_des *s1; /* It speeds up access */
 
if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
errno = EINVAL;
return -1;
}
 
kern_cli();
 
s1 = &sem_table[*s];
 
/* tracer stuff */
//trc_logevent(TRC_SEM_WAITNB,s);
 
if (s1->blocked.first != NIL || s1->count == 0) {
errno = EAGAIN;
kern_sti();
return -1;
}
else
s1->count--;
kern_sti();
return 0;
}
 
 
/*----------------------------------------------------------------------*/
/* Generic wait. If it is possible, decrements the sem counter of n, */
/* else blocks the task. */
/*----------------------------------------------------------------------*/
int sem_xwait(sem_t *s, int n, int wait)
{
struct sem_des *s1; /* It speeds up access */
 
if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
errno = EINVAL;
return -1;
}
 
/* We do not need to save context if we are sure we shall not block! */
if (wait == NON_BLOCK)
kern_cli();
else
proc_table[exec_shadow].context = kern_context_save();
 
s1 = &sem_table[*s];
 
/* The non blocking wait is really simple! */
/* We do not suspend or schedule anything */
if (wait == NON_BLOCK) {
/* tracer */
//trc_logevent(TRC_SEM_WAITNB,s);
 
if (s1->blocked.first != NIL || s1->count < n) {
errno = EAGAIN;
kern_sti();
return -1;
}
else
s1->count -= n;
 
kern_sti();
return 0;
}
/* The blocking wait is more complex... */
else {
/* the blocking wait is a cancellation point */
task_testcancel();
 
if (s1->blocked.first != NIL || s1->count < n) {
/* We must block exec task */
LEVEL l; /* for readableness only */
TIME tx; /* a dummy TIME for timespec operations */
struct timespec ty; /* a dummy timespec for timespec operations */
 
/* tracer */
trc_logevent(TRC_SEM_WAIT,s);
 
/* SAME AS SCHEDULER... manage the capacity event and the load_info */
ll_gettime(TIME_EXACT, &schedule_time);
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
proc_table[exec_shadow].avail_time -= tx;
jet_update_slice(tx);
if (cap_timer != NIL) {
event_delete(cap_timer);
cap_timer = NIL;
}
l = proc_table[exec_shadow].task_level;
level_table[l]->task_extract(l,exec_shadow);
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = WAIT_SEM;
/* Prepare sem_table des... */
sp_table[exec_shadow].decsem = n;
sp_table[exec_shadow].sem = *s;
/* ...and put it in sem queue */
iq_insertlast(exec_shadow,&s1->blocked);
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
kern_context_load(proc_table[exec_shadow].context);
/* sem_wait is a cancellation point... */
task_testcancel();
}
else {
s1->count -= n;
/* tracer */
trc_logevent(TRC_SEM_WAIT,s);
kern_context_load(proc_table[exec_shadow].context);
}
}
 
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Generic signal. It increments the sem counter of 1, and wakes one */
/* of the tasks that are blocked on the semaphore, if it is possible.The*/
/* semaphoric queue is a FIFO queue, in order to eliminate deadlocks */
/*----------------------------------------------------------------------*/
int sem_post(sem_t *s)
{
struct sem_des *s1; /* it speeds up access */
int p; /* idem */
LEVEL l;
 
if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
errno = EINVAL;
return -1;
}
 
// ugly patch to call a sem_post!!!
if (ll_ActiveInt()) {
SYS_FLAGS f;
f = kern_fsave();
s1 = &sem_table[*s];
s1->count ++; /* inc sem count */
 
p = s1->blocked.first;
if (p != NIL && sp_table[p].decsem <= s1->count) {
/* Dec sem count */
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
iq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
/* only a task can be awaken */
/* Preempt if necessary */
event_need_reschedule();
}
/* tracer */
trc_logevent(TRC_SEM_SIGNAL,s);
kern_frestore(f);
}
else {
proc_table[exec].context = kern_context_save();
s1 = &sem_table[*s];
s1->count ++; /* inc sem count */
p = s1->blocked.first;
if (p != NIL && sp_table[p].decsem <= s1->count) {
/* Dec sem count */
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
iq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
/* only a task can be awaken */
/* Preempt if necessary */
scheduler();
}
/* tracer */
trc_logevent(TRC_SEM_SIGNAL,s);
kern_context_load(proc_table[exec_shadow].context);
}
 
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Generic signal. It increments the sem counter of n, and wakes all the*/
/* tasks that are blocked on the semaphore, if it is possible. The */
/* semaphoric queue is a FIFO queue, in order to eliminate deadlocks */
/*----------------------------------------------------------------------*/
int sem_xpost(sem_t *s, int n)
{
struct sem_des *s1; /* it speeds up access */
int p; /* idem */
int fl = 0; /* a flag */
LEVEL l;
 
if (*s < 0 || *s >= SEM_NSEMS_MAX || !sem_table[*s].used) {
errno = EINVAL;
return -1;
}
 
// ugly patch to call a sem_post!!!
if (ll_ActiveInt()) {
SYS_FLAGS f;
f = kern_fsave();
s1 = &sem_table[*s];
s1->count += n; /* inc sem count */
p = s1->blocked.first;
while (p != NIL && sp_table[p].decsem <= s1->count) {
/* Dec sem count */
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
iq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
/* Next task to wake */
p = s1->blocked.first;
fl = 1;
}
/* tracer */
trc_logevent(TRC_SEM_SIGNAL,s);
/* Preempt if necessary */
if (fl) event_need_reschedule();
kern_frestore(f);
}
else {
proc_table[exec].context = kern_context_save();
s1 = &sem_table[*s];
s1->count += n; /* inc sem count */
p = s1->blocked.first;
while (p != NIL && sp_table[p].decsem <= s1->count) {
/* Dec sem count */
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
iq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
/* Next task to wake */
p = s1->blocked.first;
fl = 1;
}
/* tracer */
trc_logevent(TRC_SEM_SIGNAL,s);
/* Preempt if necessary */
if (fl) scheduler();
kern_context_load(proc_table[exec_shadow].context);
}
 
return 0;
}
 
/*----------------------------------------------------------------------*/
/* Getvalue returns the value of the semaphore (>=0). If someone is */
/* blocked on the semaphore, return the number of process blocked (<0) */
/*----------------------------------------------------------------------*/
int sem_getvalue(sem_t *sem, int *sval)
{
PID p;
 
if (*sem < 0 || *sem >= SEM_NSEMS_MAX || !sem_table[*sem].used) {
errno = EINVAL;
return -1;
}
 
kern_cli();
 
if (iq_isempty(&sem_table[*sem].blocked))
/* the sem is free */
*sval = sem_table[*sem].count;
else {
/* the sem is busy */
*sval = 0;
p = iq_query_first(&sem_table[*sem].blocked);
do {
(*sval)--;
p = iq_query_next(p, &sem_table[*sem].blocked);
} while (p != NIL);
}
 
kern_sti();
return 0;
}
 
 
/*----------------------------------------------------------------------*/
/* this function returns 1 if the task is blocked on a semaphore */
/*----------------------------------------------------------------------*/
int isBlocked(PID i)
{
if (proc_table[i].status == WAIT_SEM) return 1;
else return 0;
}
 
/shark/tags/rel_0_2/kernel/modules/ss.c
0,0 → 1,1178
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: ss.c,v 1.3 2002-11-11 08:32:07 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:07 $
------------
 
This file contains the aperiodic Sporadic Server (SS).
 
Note: in the following, server capacity and server budget are used as
synonyms.
 
When scheduling in background the flags field has the SS_BACKGROUND bit set
 
When scheduling a task because it is pointed by another task via shadows,
the task have to be extracted from the wait queue or the master level. To
check this we have to look at the activated field; it is != NIL if a task
is inserted into the master level. Only a task at a time can be inserted
into the master level.
 
The capacity of the server must be updated
- when scheduling a task normally
- when scheduling a task because it is pointed by a shadow
but not when scheduling in background.
 
When a task is extracted from the system no scheduling has to be done
until the task reenter into the system. To implement this, when a task
is extracted we block the background scheduling (the scheduling with the
master level is already blocked because the activated field is not
reset to NIL) using the SS_BACKGROUNDBLOCK bit.
 
nact[p] is -1 if the task set the activations to SKIP, >= 0 otherwise
 
In contrast to classic SS scheme, the activation happens when
a task does a create request while there is positive budget (instead to
becomes active when there is a running task with priority higger then or
equal to the server).
So the replenish time is estabished on task arrival time. Replenish time
is calculated as usual: activation time + server period.
When the server ends its budget, becomes not active until a replenishment
occurs.
 
When a task ends its computation and there are no tasks to schedule or,
again, the server budget ends, a replenish amount is posted so that, when
replenish time fires, the server budget will be updated. Replenish
amount is determined depending on how much time tasks have ran.
Replenish amount does't takes into account periods during witch tasks
handled by SS are preempted.
 
There are two models used to handle a task is running into a critic section
(owning a mutex): "nostop" model and "stop" model.
Using the "nostop" model, a task that runs into a critic section is not
stopped when server ends its budget. This is done so higger priority tasks
waiting for mutex are not blocked for so much time to replenish time occurs.
When this happens the server capacity becomes negative and the replenish
amount takes into account the negative budget part.
With "stop" model running task is always suspended when server budget ends.
If suspended task owns a mutex shared with higger priority task, the last
one cannot runs until the mutex will be released. Higger priority task
must waits at least upto next replenish time, when server budget will be
refulled and suspended task runs again.
 
Using "nostop" model, SS can uses more bandwidth respect to assigned
capacity (due to negative budgets). So, calculating the guarantee, the
longer critic section of all tasks handled by SS must be considered.
 
SS can be used either with EDF or RM master level.
 
Read SS.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <stdlib.h>
#include <modules/ss.h>
#include <ll/stdio.h>
#include <ll/string.h>
 
#include <ll/sys/ll/event.h>
 
#include <kernel/const.h>
#include <kernel/model.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/* For debugging purpose */
//#define DEBUG 1
 
/*+ Status used in the level +*/
#define SS_WAIT APER_STATUS_BASE /*+ waiting the service +*/
 
/*+ Some useful macros +*/
#define BACKGROUND_ON (lev->flags & SS_BACKGROUND)
 
extern struct event *firstevent;
 
/*+ the level redefinition for the Sporadic Server +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
/* The wcet are stored in the task descriptor's priority
field, so no other fields are needed */
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
struct timespec lastdline; /*+ the last deeadline assigned to
a SS task +*/
 
int Cs; /*+ server capacity +*/
int availCs; /*+ server avail time +*/
int period; /*+ Server period +*/
bandwidth_t U; /*+ the used bandwidth by the server +*/
 
IQUEUE wait; /*+ the wait queue of the SS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
 
 
LEVEL scheduling_level;
 
int replenishment[SS_MAX_REPLENISH]; /*+ contains replenish amounts +*/
int rfirst,rlast; /*+ first and last valid replenish
in replenish queue +*/
int rcount; /*+ queued replenishments +*/
 
int replenish_amount; /*+ partial replenishments before post +*/
ss_status server_active; /*+ Is server active? +*/
 
} SS_level_des;
 
/*+ function prototypes +*/
void SS_level_status(LEVEL l);
static void SS_replenish_timer(void *arg);
/*-------------------------------------------------------------------*/
 
/*** Utility functions ***/
 
 
/* These are for dinamic queue. **Disabled** */
#if 0
/* These routines are not tested, be carefull */
 
/*+ SS local memory allocator.
Can be used for performance optimization.
The interface is the same of kern_alloc() +*/
void inline * ss_alloc(DWORD b) {
/* Now simply wraps to standard kernel alloc */
return kern_alloc(b);
}
 
void ssq_inslast(LEVEL l, replenishq *elem) {
 
SS_level_des *lev = (SS_level_des *) level_table[l];
 
if(lev->rqueue_last == NULL) { /* empty queue */
lev->rqueue_last=elem;
lev->rqueue_first=elem;
return;
}
elem->next = NULL;
lev->rqueue_last->next = elem;
lev->rqueue_last = elem;
}
 
replenishq *ssq_getfirst(LEVEL l) {
 
SS_level_des *lev = (SS_level_des *) level_table[l];
replenishq *tmp;
if(lev->rqueue_first == NULL) { /* empty queue */
return 0;
}
tmp = lev->rqueue_first;
lev->rqueue_first = tmp->next;
if(lev->rqueue_first == NULL) { /* no more elements */
lev->rqueue_last = NULL;
}
tmp->next = NULL; /* to remove dangling pointer */
return tmp;
}
#endif
 
/* For queue implemented with array.
SS_MAX_REPLENISH array size assumed */
 
/*+ Insert an element at tail of replenish queue
LEVEL l module level
int amount element to insert
 
RETURNS:
0 seccesfull insertion
NIL no more space for insertion +*/
static inline int ssq_inslast (LEVEL l, int amount) {
 
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("insl ");
#endif
 
if (lev->rcount == SS_MAX_REPLENISH) {
return NIL; /* no more space in the queue */
}
 
lev->replenishment[lev->rlast++] = amount;
lev->rlast %= SS_MAX_REPLENISH;
lev->rcount++;
#ifdef DEBUG
printf_xy(0,0,WHITE,"%d",lev->rcount);
#endif
 
return 0;
}
 
/*+ Get first element from replenish queue
LEVEL l module level
 
RETURS:
extracted element
NIL on empty queue +*/
static inline int ssq_getfirst (LEVEL l) {
 
SS_level_des *lev = (SS_level_des *)(level_table[l]);
int tmp;
 
#ifdef DEBUG
kern_printf("getf ");
#endif
 
if (lev->rcount == 0) {
return NIL; /* empty queue */
}
tmp = lev->replenishment[lev->rfirst++];
lev->rfirst %= SS_MAX_REPLENISH;
lev->rcount--;
#ifdef DEBUG
printf_xy(0,0,WHITE,"%d",lev->rcount);
#endif
return tmp;
}
 
/*+ Enquire for empty queue
LEVEL l module level
 
RETURS:
0 queue is not empty
1 queue is empty +*/
static inline int ssq_isempty (LEVEL l) {
 
SS_level_des *lev = (SS_level_des *)(level_table[l]);
return !(lev->rcount);
 
// if(lev->rcount == 0)
// return 1;
// return 0;
}
 
/*+ Set replenish amount for budget used during task execution
LEVEL l module level */
static inline void SS_set_ra(LEVEL l)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
/* replenish must be set when the server is still active */
if(lev->server_active == SS_SERVER_ACTIVE) {
lev->server_active = SS_SERVER_NOTACTIVE;
if(ssq_inslast(l, lev->replenish_amount) == NIL) {
kern_printf("SS: no more space to post replenishment\n");
kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
SS_level_status(l);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
sys_abort(-1);
exit(-1);
#endif
}
lev->replenish_amount = 0;
}
else {
kern_printf("SS not active when posting R.A.\n");
SS_level_status(l);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
sys_abort(-1);
exit(-1);
#endif
}
}
/* ------------------------------------------------------------------ */
 
/* This static function activates the task pointed by lev->activated) */
static inline void SS_activation(SS_level_des *lev)
{
/* those two defines are for readableness */
PID p;
LEVEL m;
 
JOB_TASK_MODEL j; /* the guest model */
// struct timespec ty;
 
#ifdef DEBUG
kern_printf("SS_acti ");
#endif
 
p = lev->activated;
m = lev->scheduling_level;
 
#if 0
/* if server is active, replenish time already set */
if (lev->server_active == SS_SERVER_NOTACTIVE) {
lev->server_active = SS_SERVER_ACTIVE;
/* set replenish time */
TIMESPEC_ASSIGN(&ty, &proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period, &ty);
TIMESPEC_ASSIGN(&lev->lastdline, &ty);
#ifdef DEBUG
kern_printf("RT:%d.%d ",ty.tv_sec,ty.tv_nsec);
#endif
kern_event_post(&ty, SS_replenish_timer, (void *) l);
}
#endif
 
job_task_default_model(j,lev->lastdline);
job_task_def_period(j,lev->period);
level_table[m]->guest_create(m,p,(TASK_MODEL *)&j);
level_table[m]->guest_activate(m,p);
 
#ifdef DEBUG
kern_printf("PID:%p lastdl:%d.%d ",p,lev->lastdline.tv_sec,lev->lastdline.tv_nsec);
#endif
}
 
/*+
Before call capacity_timer, update server capacity
and replenish amount.
+*/
static void SS_capacity_timer(void *arg) {
 
LEVEL l = (LEVEL)arg;
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int tx;
 
#ifdef DEBUG
kern_printf("SS_captim ");
#endif
 
/* set replenish amount */
/* task was running while budget ends */
lev->server_active = SS_SERVER_NOTACTIVE;
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
if(ssq_inslast(l, tx+lev->replenish_amount) == NIL) {
kern_printf("SS: no more space to post replenishment\n");
kern_printf(" You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
SS_level_status(l);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
sys_abort(-1);
exit(-1);
#endif
}
lev->replenish_amount = 0;
capacity_timer(NULL);
}
 
static void SS_replenish_timer(void *arg)
{
LEVEL l = (LEVEL)arg;
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int amount;
 
#ifdef DEBUG
kern_printf("SS_reptim ");
#endif
 
/* availCs may be <0 because a task executed via a shadow for many time
lev->activated == NIL only if the prec task was finished and there
was not any other task to be put in the ready queue
... we are now activating the next task */
if ((amount = ssq_getfirst(l)) != NIL) {
lev->availCs += amount;
#ifdef DEBUG
kern_printf("AvaCs=%d ",lev->availCs);
#endif
if (lev->availCs > lev->Cs) {
/* This should not be possible. I do so for robustness. */
lev->availCs = lev->Cs;
#ifdef DEBUG
kern_printf("SS warning: budget higher then server capacity. Set to Cs.");
#endif
}
if (lev->availCs <= 0) {
/* we can be here if nostop model is used */
#ifdef DEBUG
kern_printf("WARNING: SS has non positive capacity after replenish.");
#endif
/* if there isn't pending replenishment and server
is not active we must refull somehow.
Otherwise SS remains not active forever */
if(ssq_isempty(l) && lev->server_active == SS_SERVER_NOTACTIVE) {
lev->availCs = lev->Cs;
kern_printf("SS was full replenished due to irreversible non positive budget!!!\n");
kern_printf("You should review your time extimation for critical sections ;)\n");
}
}
}
else {
/* replenish queue is empty */
kern_printf("Replenish Timer fires but no Replenish Amount defined\n");
SS_level_status(l);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
sys_abort(-1);
exit(-1);
#endif
}
 
if (lev->availCs > 0 && lev->activated == NIL) {
if (iq_query_first(&lev->wait) != NIL) {
lev->activated = iq_getfirst(&lev->wait);
/* if server is active, replenish time already set */
if (lev->server_active == SS_SERVER_NOTACTIVE) {
lev->server_active = SS_SERVER_ACTIVE;
/* set replenish time */
ll_gettime(TIME_EXACT, &ty);
ADDUSEC2TIMESPEC(lev->period, &ty);
TIMESPEC_ASSIGN(&lev->lastdline, &ty);
#ifdef DEBUG
kern_printf("RT:%d.%d ",ty.tv_sec,ty.tv_nsec);
#endif
kern_event_post(&ty, SS_replenish_timer, (void *) l);
}
SS_activation(lev);
event_need_reschedule();
}
}
}
 
static char *SS_status_to_a(WORD status)
{
if (status < MODULE_STATUS_BASE)
return status_to_a(status);
 
switch (status) {
case SS_WAIT : return "SS_Wait";
default : return "SS_Unknown";
}
}
 
 
/*-------------------------------------------------------------------*/
 
/*** Level functions ***/
 
 
static int SS_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
#ifdef DEBUG
kern_printf("SS_levacctm cl=%d ",m->pclass);
#endif
 
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) {
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m;
 
if (s->periodicity == APERIODIC) {
#ifdef DEBUG
kern_printf("AcceptApe ");
#endif
return 0;
}
#ifdef DEBUG
kern_printf("NAcceptApe ");
#endif
}
#ifdef DEBUG
kern_printf("NAccept ");
#endif
return -1;
}
 
static int SS_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
{
/* SS doesn't handles guest tasks */
return -1;
}
 
void SS_level_status(LEVEL l)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->wait);
 
kern_printf("On-line guarantee : %s\n",
(lev->flags & SS_ENABLE_GUARANTEE_EDF ||
lev->flags & SS_ENABLE_GUARANTEE_RM )?"On":"Off");
 
kern_printf("Used Bandwidth : %u/%u\n",lev->U,MAX_BANDWIDTH);
kern_printf("Period : %d\n",lev->period);
kern_printf("Capacity : %d\n",lev->Cs);
kern_printf("Avail capacity : %d\n",lev->availCs);
kern_printf("Server is %sactive\n",
(lev->server_active == SS_SERVER_NOTACTIVE ? "not ":""));
kern_printf("Pending RAs : %d\n",lev->rcount);
 
if (lev->activated != NIL)
kern_printf("Activated: Pid: %d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
iq_query_timespec(lev->activated,&lev->wait)->tv_sec,
iq_query_timespec(lev->activated,&lev->wait)->tv_nsec,
lev->nact[lev->activated],
SS_status_to_a(proc_table[lev->activated].status));
 
while (p != NIL) {
kern_printf("Pid: %d\tName: %10s\tStatus: %s\n",
p,
proc_table[p].name,
SS_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->wait);
}
}
 
static PID SS_level_scheduler(LEVEL l)
{
#ifdef DEBUG
kern_printf("SS_levsch ");
#endif
 
/* the SS don't schedule anything...
it's an RM level or similar that do it! */
return NIL;
}
 
static PID SS_level_schedulerbackground(LEVEL l)
{
/* the SS catch the background time to exec aperiodic activities */
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("SS_levschbg ");
#endif
 
lev->flags |= SS_BACKGROUND;
 
if (lev->flags & SS_BACKGROUND_BLOCK)
return NIL;
else
return iq_query_first(&lev->wait);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int SS_level_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("SS_levguarEDF ");
#endif
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int SS_level_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("SS_levguarRM ");
#endif
 
if (*freebandwidth > lev->U + RM_MINFREEBANDWIDTH) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
/*-------------------------------------------------------------------*/
 
/*** Task functions ***/
 
 
static int SS_task_create(LEVEL l, PID p, TASK_MODEL *m)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; /* if the SS_task_create is
called, the pclass must
be a valid pclass. */
 
#ifdef DEBUG
kern_printf("SS_taskcre ");
#endif
 
if (s->arrivals == SAVE_ARRIVALS)
lev->nact[p] = 0;
else
lev->nact[p] = -1;
 
return 0; /* OK, also if the task cannot be guaranteed */
}
 
static void SS_task_detach(LEVEL l, PID p)
{
/* No cleanups to do here.
SS level doesn't introduce any dynamic allocated field. */
}
 
static int SS_task_eligible(LEVEL l, PID p)
{
return 0; /* If the task p is chosen, it is always eligible */
}
 
static void SS_task_dispatch(LEVEL l, PID p, int nostop)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
 
#ifdef DEBUG
kern_printf("SS_tdi ");
#endif
 
TIMESPEC_ASSIGN(&ty, &schedule_time);
/* set replenish time */
if(!BACKGROUND_ON) {
if(lev->server_active == SS_SERVER_NOTACTIVE) {
lev->server_active = SS_SERVER_ACTIVE;
ADDUSEC2TIMESPEC(lev->period,&ty);
TIMESPEC_ASSIGN(&lev->lastdline, &ty);
#ifdef DEBUG
kern_printf("tdiPID:%d RT:%d.%d ",p,ty.tv_sec,ty.tv_nsec);
#endif
kern_event_post(&ty, SS_replenish_timer,(void *) l);
}
}
#ifdef DEBUG
if (nostop) kern_printf("NOSTOP!!! ");
#endif
 
/* there is at least one task ready inserted in an RM or similar level.
Note that we can't check the status because the scheduler sets it
to exe before calling task_dispatch.
We have to check lev->activated != p instead */
if (lev->activated != p) {
iq_extract(p, &lev->wait);
#ifdef DEBUG
kern_printf("extr task:%d ",p);
#endif
}
else {
#ifdef DEBUG
if (nostop) kern_printf("(gd status=%d)",proc_table[p].status);
#endif
level_table[lev->scheduling_level]->
guest_dispatch(lev->scheduling_level,p,nostop);
}
 
/* set capacity timer */
if (!nostop && !BACKGROUND_ON) {
TIMESPEC_ASSIGN(&ty, &schedule_time);
// kern_printf("ty:%d.%d ",ty.tv_sec,ty.tv_nsec);
ADDUSEC2TIMESPEC((lev->availCs<=0 ? 0:lev->availCs),&ty);
// kern_printf("avCs:%d ty:%d.%d ",lev->availCs,ty.tv_sec,ty.tv_nsec);
/* stop the task if budget ends */
#ifdef DEBUG
kern_printf("PID:%d ST=%d.%d ",p,ty.tv_sec,ty.tv_nsec);
#endif
cap_timer = kern_event_post(&ty, SS_capacity_timer,(void *) l);
}
}
 
static void SS_task_epilogue(LEVEL l, PID p) {
 
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int tx;
 
#ifdef DEBUG
kern_printf("SS_tep ");
#endif
/* update the server capacity */
if (BACKGROUND_ON)
lev->flags &= ~SS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
// kern_printf("ty:%d.%d ",ty.tv_sec,ty.tv_nsec);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
// kern_printf("avCs:%d ty:%d.%d ",lev->availCs,ty.tv_sec,ty.tv_nsec);
lev->replenish_amount += tx;
#ifdef DEBUG
kern_printf("RA:%d ",lev->replenish_amount);
#endif
}
 
/* check if the server capacity is finished... */
if (lev->availCs <= 0) {
/* The server slice has finished... do the task_end!!!
A first version of the module used the task_endcycle, but it was
not conceptually correct because the task didn't stop because it
finished all the work, but because the server didn't have budget!
So, if the task_endcycle is called, the task remain into the
master level, and we can't wake him up if, for example, another
task point the shadow to it!!! */
 
/* set replenish amount */
if(!(BACKGROUND_ON)) {
if(lev->server_active == SS_SERVER_ACTIVE) {
lev->server_active = SS_SERVER_NOTACTIVE;
if(ssq_inslast(l, lev->replenish_amount) == NIL) {
kern_printf("SS: no more space to post replenishment\n");
kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
SS_level_status(l);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
sys_abort(-1);
exit(-1);
#endif
}
lev->replenish_amount = 0;
}
}
 
if (lev->activated == p)
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
 
iq_insertfirst(p, &lev->wait);
proc_table[p].status = SS_WAIT;
lev->activated = NIL;
}
else {
/* The task has been preempted.
It returns into the ready queue or to the
wait queue by calling the guest_epilogue... */
 
if (lev->activated == p) { /* goes into ready queue */
level_table[ lev->scheduling_level ]->
guest_epilogue(lev->scheduling_level,p);
}
else { /* goes into wait queue */
iq_insertfirst(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
}
}
 
static void SS_task_activate(LEVEL l, PID p)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
 
#ifdef DEBUG
kern_printf("SS_tacti ");
#endif
 
if (lev->activated == p || proc_table[p].status == SS_WAIT) {
if (lev->nact[p] != -1) lev->nact[p]++;
}
else if (proc_table[p].status == SLEEP) {
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
// kern_printf("-%d.%d- ",proc_table[p].request_time.tv_sec,proc_table[p].request_time.tv_nsec);
if (lev->activated == NIL && lev->availCs > 0) {
if(!BACKGROUND_ON) {
/* if server is active, replenish time already set */
if (lev->server_active == SS_SERVER_NOTACTIVE) {
lev->server_active = SS_SERVER_ACTIVE;
/* set replenish time */
TIMESPEC_ASSIGN(&ty, &proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period, &ty);
TIMESPEC_ASSIGN(&lev->lastdline, &ty);
#ifdef DEBUG
kern_printf("RT=%d.%d ",ty.tv_sec,ty.tv_nsec);
#endif
kern_event_post(&ty, SS_replenish_timer, (void *) l);
}
}
lev->activated = p;
SS_activation(lev);
}
else {
iq_insertlast(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
}
else {
kern_printf("SS_REJ%d %d %d %d ",
p,
proc_table[p].status,
lev->activated,
lev->wait.first);
return;
}
}
 
static void SS_task_insert(LEVEL l, PID p)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("SS_tins ");
#endif
lev->flags &= ~SS_BACKGROUND_BLOCK;
 
lev->activated = NIL;
 
/* when we reinsert the task into the system, the server capacity
is always 0 because nobody executes with the SS before... */
iq_insertfirst(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
 
static void SS_task_extract(LEVEL l, PID p)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
 
#ifdef DEBUG
kern_printf("SS_textr ");
#endif
 
/* set replenish amount */
if(!(BACKGROUND_ON)) {
SS_set_ra(l);
}
 
/* clear the server capacity */
lev->availCs = 0;
 
lev->flags |= SS_BACKGROUND_BLOCK;
 
if (lev->activated == p)
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
}
 
static void SS_task_endcycle(LEVEL l, PID p)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int tx;
 
#ifdef DEBUG
kern_printf("SS_tendcy ");
#endif
 
/* update the server capacity */
if (BACKGROUND_ON)
lev->flags &= ~SS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
lev->replenish_amount += tx;
#ifdef DEBUG
kern_printf("PID:%d RA=%d ",lev->replenish_amount);
#endif
}
 
if (lev->activated == p)
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
 
if (lev->nact[p] > 0) {
lev->nact[p]--;
iq_insertlast(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
else {
proc_table[p].status = SLEEP;
}
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL) {
SS_activation(lev);
}
else {
/* No more task to schedule; set replenish amount */
if(!(BACKGROUND_ON)) {
SS_set_ra(l);
}
}
}
 
static void SS_task_end(LEVEL l, PID p)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int tx;
 
#ifdef DEBUG
kern_printf("SS_tend ");
#endif
 
/* update the server capacity */
if (BACKGROUND_ON)
lev->flags &= ~SS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
lev->replenish_amount += tx;
#ifdef DEBUG
kern_printf("PID:%d RA=%d ",p,lev->replenish_amount);
#endif
}
 
if (lev->activated == p)
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
 
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL) {
SS_activation(lev);
}
else {
if(!(BACKGROUND_ON)){
/* No more task to schedule; set replenish amount */
SS_set_ra(l);
}
}
}
 
static void SS_task_sleep(LEVEL l, PID p)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int tx;
 
#ifdef DEBUG
kern_printf("SS_tasksle ");
#endif
 
/* update the server capacity */
if (BACKGROUND_ON)
lev->flags &= ~SS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
lev->replenish_amount += tx;
#ifdef DEBUG
kern_printf("PID:%d RA=%d ",p,lev->replenish_amount);
#endif
}
 
lev->nact[p] = 0;
 
if (lev->activated == p)
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
 
proc_table[p].status = SLEEP;
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL) {
SS_activation(lev);
}
else {
if(!(BACKGROUND_ON)){
/* No more task to schedule; set replenish amount */
SS_set_ra(l);
}
}
}
 
 
/*-------------------------------------------------------------------*/
 
/*** Guest functions ***/
 
 
/* SS doesn't handles guest tasks */
 
static int SS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void SS_guest_detach(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_activate(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_insert(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_extract(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_end(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
/*-------------------------------------------------------------------*/
 
/*** Registration functions ***/
 
 
/*+ Registration function:
int flags the init flags ... see SS.h +*/
void SS_register_level(int flags, LEVEL master, int Cs, int per)
{
LEVEL l; /* the level that we register */
SS_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
/* request an entry in the level_table */
l = level_alloc_descriptor();
#ifdef DEBUG
kern_printf("Alloc des %d ",l);
#endif
 
/* alloc the space needed for the SS_level_des */
lev = (SS_level_des *)kern_alloc(sizeof(SS_level_des));
 
/* update the level_table with the new entry */
level_table[l] = (level_des *)lev;
 
/* fill the standard descriptor */
strncpy(lev->l.level_name, SS_LEVELNAME, MAX_LEVELNAME);
lev->l.level_code = SS_LEVEL_CODE;
lev->l.level_version = SS_LEVEL_VERSION;
 
lev->l.level_accept_task_model = SS_level_accept_task_model;
lev->l.level_accept_guest_model = SS_level_accept_guest_model;
lev->l.level_status = SS_level_status;
 
if (flags & SS_ENABLE_BACKGROUND)
lev->l.level_scheduler = SS_level_schedulerbackground;
else
lev->l.level_scheduler = SS_level_scheduler;
 
if (flags & SS_ENABLE_GUARANTEE_EDF)
lev->l.level_guarantee = SS_level_guaranteeEDF;
else if (flags & SS_ENABLE_GUARANTEE_RM)
lev->l.level_guarantee = SS_level_guaranteeRM;
else
lev->l.level_guarantee = NULL;
 
lev->l.task_create = SS_task_create;
lev->l.task_detach = SS_task_detach;
lev->l.task_eligible = SS_task_eligible;
lev->l.task_dispatch = SS_task_dispatch;
lev->l.task_epilogue = SS_task_epilogue;
lev->l.task_activate = SS_task_activate;
lev->l.task_insert = SS_task_insert;
lev->l.task_extract = SS_task_extract;
lev->l.task_endcycle = SS_task_endcycle;
lev->l.task_end = SS_task_end;
lev->l.task_sleep = SS_task_sleep;
 
lev->l.guest_create = SS_guest_create;
lev->l.guest_detach = SS_guest_detach;
lev->l.guest_dispatch = SS_guest_dispatch;
lev->l.guest_epilogue = SS_guest_epilogue;
lev->l.guest_activate = SS_guest_activate;
lev->l.guest_insert = SS_guest_insert;
lev->l.guest_extract = SS_guest_extract;
lev->l.guest_endcycle = SS_guest_endcycle;
lev->l.guest_end = SS_guest_end;
lev->l.guest_sleep = SS_guest_sleep;
 
/* fill the SS descriptor part */
 
for (i=0; i<MAX_PROC; i++)
lev->nact[i] = -1;
 
lev->Cs = Cs;
lev->availCs = Cs;
 
lev->period = per;
 
iq_init(&lev->wait, &freedesc, 0);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / per) * Cs;
lev->scheduling_level = master;
lev->flags = flags & 0x07;
/* This is superfluos. I do it for robustness */
for (i=0;i<SS_MAX_REPLENISH;lev->replenishment[i++]=0);
/* Initialize replenishment stuff */
lev->rfirst=0;
lev->rlast=0;
lev->rcount=0;
lev->replenish_amount=0;
lev->server_active=SS_SERVER_NOTACTIVE;
}
 
bandwidth_t SS_usedbandwidth(LEVEL l)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
if (lev->l.level_code == SS_LEVEL_CODE &&
lev->l.level_version == SS_LEVEL_VERSION)
return lev->U;
else
return 0;
}
 
int SS_availCs(LEVEL l) {
SS_level_des *lev = (SS_level_des *)(level_table[l]);
if (lev->l.level_code == SS_LEVEL_CODE &&
lev->l.level_version == SS_LEVEL_VERSION)
return lev->availCs;
else
return 0;
}
/shark/tags/rel_0_2/kernel/modules/tbs.c
0,0 → 1,592
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: tbs.c,v 1.3 2002-11-11 08:32:07 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:07 $
------------
 
This file contains the aperiodic server TBS (Total Bandwidth Server)
 
Read tbs.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/tbs.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/*+ 4 debug purposes +*/
#undef TBS_TEST
 
/*+ Status used in the level +*/
#define TBS_WCET_VIOLATED APER_STATUS_BASE+2 /*+ when wcet is finished +*/
#define TBS_WAIT APER_STATUS_BASE /*+ waiting the service +*/
 
/*+ task flags +*/
#define TBS_SAVE_ARRIVALS 1
 
/*+ the level redefinition for the Total Bandwidth Server level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
/* The wcet are stored in the task descriptor's priority
field. */
 
int nact[MAX_PROC]; /*+ used to record activations +*/
BYTE flag[MAX_PROC];
 
struct timespec lastdline; /*+ the last deadline assigned to
a TBS task +*/
 
IQUEUE wait; /*+ the wait queue of the TBS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth by the server +*/
int band_num;
int band_den;
 
LEVEL scheduling_level;
 
} TBS_level_des;
 
 
static char *TBS_status_to_a(WORD status)
{
if (status < MODULE_STATUS_BASE)
return status_to_a(status);
 
switch (status) {
case TBS_WCET_VIOLATED: return "TBS_Wcet_Violated";
case TBS_WAIT : return "TBS_Wait";
default : return "TBS_Unknown";
}
}
 
#ifdef TESTG
#include "drivers/glib.h"
#endif
 
/* This static function activates the task pointed by lev->activated) */
static __inline__ void TBS_activation(TBS_level_des *lev)
{
PID p; /* for readableness */
JOB_TASK_MODEL j; /* the guest model */
TIME drel; /* the relative deadline of the task */
LEVEL m; /* the master level... only for readableness */
 
#ifdef TESTG
TIME x;
extern TIME starttime;
#endif
 
p = lev->activated;
/* we compute a suitable deadline for the task */
drel = (proc_table[p].wcet * lev->band_den) / lev->band_num;
 
if (TIMESPEC_A_GT_B(&proc_table[p].request_time, &lev->lastdline))
TIMESPEC_ASSIGN(&lev->lastdline, &proc_table[p].request_time );
 
ADDUSEC2TIMESPEC(drel, &lev->lastdline);
 
#ifdef TESTG
if (starttime) {
x = ((lev->lastdline.tv_sec*1000000+lev->lastdline.tv_nsec/1000)/5000 - starttime) + 20;
if (x<640)
grx_plot(x, 15, 7);
}
#endif
 
/* and we insert the task in another level */
m = lev->scheduling_level;
job_task_default_model(j,lev->lastdline);
level_table[m]->guest_create(m,p,(TASK_MODEL *)&j);
level_table[m]->guest_activate(m,p);
 
#ifdef TBS_TEST
kern_printf("TBS_activation: lastdline %ds %dns\n",lev->lastdline.tv_sec,lev->lastdline.tv_nsec);
#endif
}
 
/* This static function reclaims the unused time of the task p */
static __inline__ void TBS_bandwidth_reclaiming(TBS_level_des *lev, PID p)
{
TIME reclaimed;
struct timespec r, sos;
 
// kern_printf("%d ", proc_table[p].avail_time);
reclaimed = (proc_table[p].avail_time * lev->band_den) / lev->band_num;
 
r.tv_nsec = (reclaimed % 1000000) * 1000;
r.tv_sec = reclaimed / 1000000;
 
SUBTIMESPEC(&lev->lastdline, &r, &sos);
TIMESPEC_ASSIGN(&lev->lastdline, &sos);
 
#ifdef TBS_TEST
kern_printf("TBS_bandwidth_reclaiming: lastdline %ds %dns, reclaimed %d, avail %d\n",
lev->lastdline.tv_sec, lev->lastdline.tv_nsec, reclaimed, proc_table[p].avail_time);
#endif
}
 
 
 
static int TBS_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) {
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m;
 
if (s->wcet && s->periodicity == APERIODIC)
return 0;
}
 
return -1;
}
 
static int TBS_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
{
return -1;
}
 
static char *onoff(int i)
{
if (i)
return "On ";
else
return "Off";
}
 
static void TBS_level_status(LEVEL l)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->wait);
 
kern_printf("Wcet Check : %s\n",
onoff(lev->flags & TBS_ENABLE_WCET_CHECK));
kern_printf("On-line guarantee : %s\n",
onoff(lev->flags & TBS_ENABLE_GUARANTEE));
kern_printf("Used Bandwidth : %u/%u\n",
lev->U, MAX_BANDWIDTH);
kern_printf("Last deadline : %lds %ldns\n",lev->lastdline.tv_sec,
lev->lastdline.tv_nsec);
 
if (lev->activated != -1)
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%9ld nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
iq_query_timespec(lev->activated, &lev->wait)->tv_sec,
iq_query_timespec(lev->activated, &lev->wait)->tv_nsec,
lev->nact[lev->activated],
TBS_status_to_a(proc_table[lev->activated].status));
 
while (p != NIL) {
kern_printf("Pid: %2d Name: %10s Stat: %s\n",
p,
proc_table[p].name,
TBS_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->wait);
}
}
 
static PID TBS_level_scheduler(LEVEL l)
{
/* the TBS don't schedule anything...
it's an EDF level or similar that do it! */
return NIL;
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
static int TBS_level_guarantee(LEVEL l, bandwidth_t *freebandwidth)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
if (*freebandwidth >= lev->U) {
*freebandwidth -= lev->U;
return 1;
}
else
return 0;
}
 
static int TBS_task_create(LEVEL l, PID p, TASK_MODEL *m)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
/* if the TBS_task_create is called, then the pclass must be a
valid pclass. */
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m;
 
proc_table[p].wcet = s->wcet;
 
/* Enable wcet check */
if (lev->flags & TBS_ENABLE_WCET_CHECK) {
proc_table[p].avail_time = s->wcet;
proc_table[p].control |= CONTROL_CAP;
}
 
lev->nact[p] = 0;
if (s->arrivals == SAVE_ARRIVALS)
lev->flag[p] = TBS_SAVE_ARRIVALS;
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void TBS_task_detach(LEVEL l, PID p)
{
/* the TBS level doesn't introduce any dinamic allocated new field. */
}
 
static int TBS_task_eligible(LEVEL l, PID p)
{
return 0; /* if the task p is chosen, it is always eligible */
}
 
static void TBS_task_dispatch(LEVEL l, PID p, int nostop)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
/* there is at least one task ready inserted in an EDF or similar
level */
 
level_table[ lev->scheduling_level ]->
guest_dispatch(lev->scheduling_level,p,nostop);
}
 
static void TBS_task_epilogue(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
if ((lev->flags & TBS_ENABLE_WCET_CHECK) && proc_table[p].avail_time <= 0) {
/* if it is, raise a XWCET_VIOLATION exception */
kern_raise(XWCET_VIOLATION,p);
proc_table[p].status = TBS_WCET_VIOLATED;
 
/* the current task have to die in the scheduling queue, and another
have to be put in place... this code is identical to the
TBS_task_end */
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
 
/* we reclaim an avail time that can be <0 due to the timer
approximations -> we have to postpone the deadline a little!
we can use the ADDUSEC2TIMESPEC because the time postponed is
less than 55ms */
ADDUSEC2TIMESPEC((-proc_table[p].avail_time * lev->band_den)
/ lev->band_num, &lev->lastdline);
 
#ifdef TBS_TEST
kern_printf("TBS_task_epilogue: Deadline posponed to %ds %dns\n",
lev->lastdline.tv_sec, lev->lastdline.tv_nsec);
#endif
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
}
else
/* the task has been preempted. it returns into the ready queue by
calling the guest_epilogue... */
level_table[ lev->scheduling_level ]->
guest_epilogue(lev->scheduling_level,p);
}
 
static void TBS_task_activate(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
if (proc_table[p].status == SLEEP ||
proc_table[p].status == TBS_WCET_VIOLATED) {
 
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
if (lev->activated == NIL) {
/* This is the first task in the level, so we activate it immediately */
lev->activated = p;
TBS_activation(lev);
}
else {
proc_table[p].status = TBS_WAIT;
iq_insertlast(p, &lev->wait);
}
}
else if (lev->flag[p] & TBS_SAVE_ARRIVALS)
lev->nact[p]++;
/* else
kern_printf("TBSREJ!!!");*/
}
 
static void TBS_task_insert(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
level_table[ lev->scheduling_level ]->
guest_insert(lev->scheduling_level,p);
}
 
static void TBS_task_extract(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
level_table[ lev->scheduling_level ]->
guest_extract(lev->scheduling_level,p);
}
 
static void TBS_task_endcycle(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
/* a task activation is finished, but we are using a JOB_TASK_MODEL
that implements a single activation, so we have to call
the guest_end, that representsa single activation... */
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
 
TBS_bandwidth_reclaiming(lev,p);
 
/* we reset the capacity counters... */
if (lev->flags & TBS_ENABLE_WCET_CHECK)
proc_table[p].avail_time = proc_table[p].wcet;
 
if (lev->nact[p]) {
// lev->nact[p] can be >0 only if the SAVE_ARRIVALS bit is set
lev->nact[p]--;
proc_table[p].status = TBS_WAIT;
iq_insertlast(p, &lev->wait);
}
else
proc_table[p].status = SLEEP;
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
 
}
 
static void TBS_task_end(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
 
TBS_bandwidth_reclaiming(lev,p);
 
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
}
 
static void TBS_task_sleep(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
/* a task activation is finished, but we are using a JOB_TASK_MODEL
that implements a single activation, so we have to call
the guest_end, that representsa single activation... */
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
 
TBS_bandwidth_reclaiming(lev,p);
 
/* we reset the capacity counters... */
if (lev->flags & TBS_ENABLE_WCET_CHECK)
proc_table[p].avail_time = proc_table[p].wcet;
 
proc_table[p].status = SLEEP;
 
lev->nact[p] = 0;
 
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
 
}
 
static int TBS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void TBS_guest_detach(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_activate(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_insert(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_extract(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_end(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ Registration function:
int flags the init flags ... see TBS.h +*/
void TBS_register_level(int flags, LEVEL master, int num, int den)
{
LEVEL l; /* the level that we register */
TBS_level_des *lev; /* for readableness only */
PID i; /* a counter */
 
printk("TBS_register_level\n");
 
/* request an entry in the level_table */
l = level_alloc_descriptor();
 
printk(" alloco descrittore %d %d\n",l,(int)sizeof(TBS_level_des));
 
/* alloc the space needed for the TBS_level_des */
lev = (TBS_level_des *)kern_alloc(sizeof(TBS_level_des));
 
printk(" lev=%d\n",(int)lev);
 
/* update the level_table with the new entry */
level_table[l] = (level_des *)lev;
 
/* fill the standard descriptor */
strncpy(lev->l.level_name, TBS_LEVELNAME, MAX_LEVELNAME);
lev->l.level_code = TBS_LEVEL_CODE;
lev->l.level_version = TBS_LEVEL_VERSION;
 
lev->l.level_accept_task_model = TBS_level_accept_task_model;
lev->l.level_accept_guest_model = TBS_level_accept_guest_model;
lev->l.level_status = TBS_level_status;
lev->l.level_scheduler = TBS_level_scheduler;
 
if (flags & TBS_ENABLE_GUARANTEE)
lev->l.level_guarantee = TBS_level_guarantee;
else
lev->l.level_guarantee = NULL;
 
lev->l.task_create = TBS_task_create;
lev->l.task_detach = TBS_task_detach;
lev->l.task_eligible = TBS_task_eligible;
lev->l.task_dispatch = TBS_task_dispatch;
lev->l.task_epilogue = TBS_task_epilogue;
lev->l.task_activate = TBS_task_activate;
lev->l.task_insert = TBS_task_insert;
lev->l.task_extract = TBS_task_extract;
lev->l.task_endcycle = TBS_task_endcycle;
lev->l.task_end = TBS_task_end;
lev->l.task_sleep = TBS_task_sleep;
 
lev->l.guest_create = TBS_guest_create;
lev->l.guest_detach = TBS_guest_detach;
lev->l.guest_dispatch = TBS_guest_dispatch;
lev->l.guest_epilogue = TBS_guest_epilogue;
lev->l.guest_activate = TBS_guest_activate;
lev->l.guest_insert = TBS_guest_insert;
lev->l.guest_extract = TBS_guest_extract;
lev->l.guest_endcycle = TBS_guest_endcycle;
lev->l.guest_end = TBS_guest_end;
lev->l.guest_sleep = TBS_guest_sleep;
 
/* fill the TBS descriptor part */
 
for (i = 0; i < MAX_PROC; i++) {
lev->nact[i] = 0;
lev->flag[i] = 0;
}
 
NULL_TIMESPEC(&lev->lastdline);
 
iq_init(&lev->wait, &freedesc, 0);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / den) * num;
lev->band_num = num;
lev->band_den = den;
 
lev->scheduling_level = master;
 
lev->flags = flags & 0x07;
}
 
bandwidth_t TBS_usedbandwidth(LEVEL l)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
if (lev->l.level_code == TBS_LEVEL_CODE &&
lev->l.level_version == TBS_LEVEL_VERSION)
return lev->U;
else
return 0;
}
 
int TBS_get_nact(LEVEL l, PID p)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
if (lev->l.level_code == TBS_LEVEL_CODE &&
lev->l.level_version == TBS_LEVEL_VERSION)
return lev->nact[p];
else
return -1;
}
 
/shark/tags/rel_0_2/kernel/modules/dummy.c
0,0 → 1,312
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: dummy.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the Dummy scheduling module
 
Read dummy.h for further details.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
#include <modules/dummy.h>
#include <ll/ll.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/config.h>
#include <sys/types.h>
#include <modules/codes.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
 
 
/*+ the level redefinition for the Dummy level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
PID dummy; /*+ the dummy task... +*/
} dummy_level_des;
 
 
static int dummy_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
dummy_level_des *lev = (dummy_level_des *)(level_table[l]);
 
if ((m->pclass == DUMMY_PCLASS || m->pclass == (DUMMY_PCLASS | l))
&& lev->dummy == -1)
return 0;
else
return -1;
}
 
static int dummy_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
{
return -1;
}
 
static void dummy_level_status(LEVEL l)
{
dummy_level_des *lev = (dummy_level_des *)(level_table[l]);
 
kern_printf("dummy PID: %d\n", lev->dummy);
};
 
 
static PID dummy_level_scheduler(LEVEL l)
{
dummy_level_des *lev = (dummy_level_des *)(level_table[l]);
//kern_printf("DUMMYsched!!! %d", lev->dummy);
return lev->dummy;
}
 
/* There is not guarantee on this level!!! -> the entry must be null
int (*level_guarantee)(LEVEL l, DWORD *freebandwidth); */
 
static int dummy_task_create(LEVEL l, PID p, TASK_MODEL *m)
{
/* the dummy level doesn't introduce any new field in the TASK_MODEL
so, all initialization stuffs are done by the task_create.
the task state is set at SLEEP by the general task_create */
return 0; /* OK */
}
 
static void dummy_task_detach(LEVEL l, PID p)
{
/* the dummy level doesn't introduce any new field in the TASK_MODEL
so, all detach stuffs are done by the task_create
The task state is set at FREE by the general task_create */
}
 
static int dummy_task_eligible(LEVEL l, PID p)
{
return 0; /* if the task p is chosen, it is always eligible */
}
 
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
static void dummy_task_dispatch(LEVEL l, PID p, int nostop)
{
/* nothing... the dummy hangs the cpu waiting for interrupts... */
if (0)//testactive)
{
s_stime[useds]= schedule_time;
s_curr[useds] = -1;
s_PID[useds] = p;
useds++;
}
 
//kern_printf("ÛDUMMYÛ");
 
}
 
static void dummy_task_epilogue(LEVEL l, PID p)
{
proc_table[p].status = SLEEP; /* Paranoia */
}
 
static void dummy_task_activate(LEVEL l, PID p)
{ kern_printf("Dummy1"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static void dummy_task_insert(LEVEL l, PID p)
{ kern_printf("Dummy2"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static void dummy_task_extract(LEVEL l, PID p)
{ kern_printf("Dummy3"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static void dummy_task_endcycle(LEVEL l, PID p)
{ kern_printf("Dummy4"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static void dummy_task_end(LEVEL l, PID p)
{ kern_printf("Dummy5"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static void dummy_task_sleep(LEVEL l, PID p)
{ kern_printf("Dummy6"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static int dummy_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_printf("Dummy8"); kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void dummy_guest_detach(LEVEL l, PID p)
{ kern_printf("Dummy9"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_printf("Dummy0"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_epilogue(LEVEL l, PID p)
{ kern_printf("Dummya"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_activate(LEVEL l, PID p)
{ kern_printf("Dummyb"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_insert(LEVEL l, PID p)
{ kern_printf("Dummyc"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_extract(LEVEL l, PID p)
{ kern_printf("Dummyd"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_endcycle(LEVEL l, PID p)
{ kern_printf("Dummye"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_end(LEVEL l, PID p)
{ kern_printf("Dummyf"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_sleep(LEVEL l, PID p)
{ kern_printf("Dummyg"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
/*+ Dummy task must be present & cannot be killed; +*/
static TASK dummy()
{
/*
It is possible to Halt the CPU & avoid consumption if idle
cycle are intercepted with hlt instructions!
It seems that some CPU have buggy hlt instruction or they
have not it at all! So, if available, use the hlt facility!!
*/
#ifdef __HLT_WORKS__
for(;;) {
// kern_printf("?");
hlt();
}
#else
for(;;);// kern_printf("?");
#endif
}
 
/* Registration functions */
 
/*+ This init function install the dummy task +*/
static void dummy_create(void *l)
{
LEVEL lev;
PID p;
DUMMY_TASK_MODEL m;
 
lev = (LEVEL)l;
 
dummy_task_default_model(m);
dummy_task_def_level(m,lev);
dummy_task_def_system(m);
dummy_task_def_nokill(m);
dummy_task_def_ctrl_jet(m);
 
((dummy_level_des *)level_table[lev])->dummy = p =
task_create("Dummy", dummy, &m, NULL);
 
if (p == NIL)
printk("\nPanic!!! can't create dummy task...\n");
 
/* dummy must block all tasks... */
proc_table[p].sigmask = 0xFFFFFFFF;
}
 
 
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified +*/
void dummy_register_level()
{
LEVEL l; /* the level that we register */
dummy_level_des *lev; /* for readableness only */
 
printk("Entro in dummy_register_level\n");
/* request an entry in the level_table */
l = level_alloc_descriptor();
 
/* alloc the space needed for the dummy_level_des */
lev = (dummy_level_des *)kern_alloc(sizeof(dummy_level_des));
 
/* update the level_table with the new entry */
level_table[l] = (level_des *)lev;
 
/* fill the standard descriptor */
strncpy(lev->l.level_name, DUMMY_LEVELNAME, MAX_LEVELNAME);
lev->l.level_code = DUMMY_LEVEL_CODE;
lev->l.level_version = DUMMY_LEVEL_VERSION;
 
lev->l.level_accept_task_model = dummy_level_accept_task_model;
lev->l.level_accept_guest_model = dummy_level_accept_guest_model;
lev->l.level_status = dummy_level_status;
lev->l.level_scheduler = dummy_level_scheduler;
lev->l.level_guarantee = NULL; /* No guarantee! */
 
lev->l.task_create = dummy_task_create;
lev->l.task_detach = dummy_task_detach;
lev->l.task_eligible = dummy_task_eligible;
lev->l.task_dispatch = dummy_task_dispatch;
lev->l.task_epilogue = dummy_task_epilogue;
lev->l.task_activate = dummy_task_activate;
lev->l.task_insert = dummy_task_insert;
lev->l.task_extract = dummy_task_extract;
lev->l.task_endcycle = dummy_task_endcycle;
lev->l.task_end = dummy_task_end;
lev->l.task_sleep = dummy_task_sleep;
 
lev->l.guest_create = dummy_guest_create;
lev->l.guest_detach = dummy_guest_detach;
lev->l.guest_dispatch = dummy_guest_dispatch;
lev->l.guest_epilogue = dummy_guest_epilogue;
lev->l.guest_activate = dummy_guest_activate;
lev->l.guest_insert = dummy_guest_insert;
lev->l.guest_extract = dummy_guest_extract;
lev->l.guest_endcycle = dummy_guest_endcycle;
lev->l.guest_end = dummy_guest_end;
lev->l.guest_sleep = dummy_guest_sleep;
 
/* the dummy process will be created at init_time.
see also dummy_level_accept_model,dummy_create */
lev->dummy = -1;
 
printk("\tPosto dummy_create\n");
 
sys_atrunlevel(dummy_create,(void *) l, RUNLEVEL_INIT);
}
/shark/tags/rel_0_2/kernel/modules/nop.c
0,0 → 1,306
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: nop.c,v 1.2 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
Binary Semaphores. see nop.h for more details...
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/nop.h>
 
#include <ll/ll.h>
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <modules/codes.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/* The NOP resource level descriptor */
typedef struct {
mutex_resource_des m; /*+ the mutex interface +*/
} NOP_mutex_resource_des;
 
 
/* this is the structure normally pointed by the opt field in the
mutex_t structure */
typedef struct {
PID owner;
IQUEUE blocked;
} NOP_mutex_t;
 
 
/* Wait status for this library */
#define NOP_WAIT LIB_STATUS_BASE
 
 
/*+ print resource protocol statistics...+*/
static void NOP_resource_status(RLEVEL r)
{
kern_printf("No status for NOP module\n");
}
 
 
static int NOP_level_accept_resource_model(RLEVEL l, RES_MODEL *r)
{
/* priority inheritance works with all tasks without Resource parameters */
return -1;
}
 
static void NOP_res_register(RLEVEL l, PID p, RES_MODEL *r)
{
/* never called!!! */
}
 
static void NOP_res_detach(RLEVEL l, PID p)
{
}
 
static int NOP_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a)
{
if (a->mclass == NOP_MCLASS || a->mclass == (NOP_MCLASS | l) )
return 0;
else
return -1;
}
 
static int NOP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
{
NOP_mutex_t *p;
 
p = (NOP_mutex_t *) kern_alloc(sizeof(NOP_mutex_t));
 
/* control if there is enough memory; no control on init on a
non- destroyed mutex */
 
if (!p)
return (ENOMEM);
 
p->owner = NIL;
iq_init(&p->blocked, &freedesc, 0);
 
m->mutexlevel = l;
m->opt = (void *)p;
 
return 0;
}
 
 
static int NOP_destroy(RLEVEL l, mutex_t *m)
{
// NOP_mutex_resource_des *lev = (NOP_mutex_resource_des *)(resource_table[l]);
 
if ( ((NOP_mutex_t *)m->opt)->owner != NIL)
return (EBUSY);
 
kern_cli();
if (m->opt) {
kern_free(m->opt,sizeof(NOP_mutex_t));
m->opt = NULL;
}
kern_sti();
 
return 0;
}
 
static int NOP_lock(RLEVEL l, mutex_t *m)
{
NOP_mutex_t *p;
 
kern_cli();
 
p = (NOP_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, initialize it! */
NOP_mutexattr_t a;
NOP_mutexattr_default(a);
NOP_init(l, m, &a);
}
 
if (p->owner == exec_shadow) {
/* the task already owns the mutex */
kern_sti();
return (EDEADLK);
}
 
if (p->owner != NIL) { /* We must block exec task */
LEVEL l; /* for readableness only */
TIME tx; /* a dummy TIME for timespec operations */
struct timespec ty; /* a dummy timespec for timespec operations */
proc_table[exec_shadow].context = kern_context_save();
/* SAME AS SCHEDULER... manage the capacity event and the load_info */
ll_gettime(TIME_EXACT, &schedule_time);
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
proc_table[exec_shadow].avail_time -= tx;
jet_update_slice(tx);
if (cap_timer != NIL) {
event_delete(cap_timer);
cap_timer = NIL;
}
l = proc_table[exec_shadow].task_level;
level_table[l]->task_extract(l,exec_shadow);
 
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = NOP_WAIT;
iq_insertlast(exec_shadow,&p->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
scheduler();
kern_context_load(proc_table[exec_shadow].context);
}
else {
/* the mutex is free, We can lock it! */
p->owner = exec_shadow;
kern_sti();
}
 
return 0;
}
 
static int NOP_trylock(RLEVEL l, mutex_t *m)
{
NOP_mutex_t *p;
 
kern_cli();
 
p = (NOP_mutex_t *)m->opt;
if (!p) {
/* if the mutex is not initialized, initialize it! */
NOP_mutexattr_t a;
NOP_mutexattr_default(a);
NOP_init(l, m, &a);
}
 
if (p->owner != NIL) {
/* a task already owns the mutex */
kern_sti();
return (EBUSY);
}
else {
/* the mutex is free, We can lock it! */
p->owner = exec_shadow;
kern_sti();
}
 
return 0;
}
 
static int NOP_unlock(RLEVEL l, mutex_t *m)
{
NOP_mutex_t *p;
 
p = (NOP_mutex_t *)m->opt;
if (!p)
return (EINVAL);
 
if (p->owner != exec_shadow) {
/* the mutex is owned by another task!!! */
kern_sti();
return (EPERM);
}
 
proc_table[exec_shadow].context = kern_context_save();
 
/* the mutex is mine, pop the firsttask to extract */
p->owner = iq_getfirst(&p->blocked);
if (p->owner != NIL) {
l = proc_table[p->owner].task_level;
level_table[l]->task_insert(l,p->owner);
}
 
scheduler();
kern_context_load(proc_table[exec_shadow].context);
 
return 0;
}
 
void NOP_register_module(void)
{
RLEVEL l; /* the level that we register */
NOP_mutex_resource_des *m; /* for readableness only */
 
printk("NOP_register_module\n");
 
/* request an entry in the level_table */
l = resource_alloc_descriptor();
 
/* alloc the space needed for the EDF_level_des */
m = (NOP_mutex_resource_des *)kern_alloc(sizeof(NOP_mutex_resource_des));
 
/* update the level_table with the new entry */
resource_table[l] = (resource_des *)m;
 
/* fill the resource_des descriptor */
strncpy(m->m.r.res_name, NOP_MODULENAME, MAX_MODULENAME);
m->m.r.res_code = NOP_MODULE_CODE;
m->m.r.res_version = NOP_MODULE_VERSION;
 
m->m.r.rtype = MUTEX_RTYPE;
 
m->m.r.resource_status = NOP_resource_status;
m->m.r.level_accept_resource_model = NOP_level_accept_resource_model;
m->m.r.res_register = NOP_res_register;
 
m->m.r.res_detach = NOP_res_detach;
 
/* fill the mutex_resource_des descriptor */
m->m.level_accept_mutexattr = NOP_level_accept_mutexattr;
m->m.init = NOP_init;
m->m.destroy = NOP_destroy;
m->m.lock = NOP_lock;
m->m.trylock = NOP_trylock;
m->m.unlock = NOP_unlock;
 
}
 
/shark/tags/rel_0_2/kernel/modules/trcudp.c
0,0 → 1,261
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/*
* Copyright (C) 2002 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* CVS : $Id: trcudp.c,v 1.3 2002-10-28 10:11:38 pj Exp $
*/
 
#include <ll/sys/types.h>
#include <ll/stdlib.h>
 
#include <kernel/func.h>
#include <kernel/mem.h>
#include <kernel/log.h>
 
#include <trace/types.h>
#include <trace/trace.h>
#include <trace/queues.h>
 
//#define DEBUG_TRCUDP
 
#define TRCUDP_MAXEVENTS (1500/sizeof(trc_event_t))
//#define TRCUDP_MAXEVENTS 10
 
/* Well... this file is very similar to trccirc.c! */
 
typedef struct TAGtrcudp_queue_t {
/*+ size of the queue +*/
int size;
/*+ index of the next insertion into the queue +*/
int index;
/*+ index of the next item to write (if online_tracer activated) +*/
int windex;
/*+ number of events lost (if online_tracer activated) +*/
long hoops;
/*+ local and remote IP numbers +*/
UDP_ADDR local, remote;
/*+ unique number that identify the queue +*/
int uniq;
/*+ =1 when the system shuts down +*/
int mustgodown;
TASK_MODEL *m;
/*+ dummy, needed for creating a valid packet (dirty trick ;-) +*/
short int dummy;
/*+ events table +*/
trc_event_t table[0];
} trcudp_queue_t;
 
static TASK online_tracer(trcudp_queue_t *queue)
{
int s; /* the socket */
int newwindex; /* new write index after sending the packet */
int n; /* number of packets to send */
short int *pkt;
 
 
s = udp_bind(&queue->local, NULL);
for (;;) {
if (queue->index<queue->windex) {
if (queue->windex+TRCUDP_MAXEVENTS < queue->size) {
newwindex = queue->windex+TRCUDP_MAXEVENTS;
n = TRCUDP_MAXEVENTS;
} else {
newwindex = 0;
n = queue->size-queue->windex;
}
} else {
if (queue->windex+TRCUDP_MAXEVENTS < queue->index) {
newwindex = queue->windex+TRCUDP_MAXEVENTS;
n = TRCUDP_MAXEVENTS;
} else {
newwindex = queue->index;
n = queue->index-queue->windex;
}
}
if (n) {
/* set the number of events into the UDP packet. It works
because the event entry before windex is always empty, or
because we use the dummy field into the struct */
pkt = ((short int *)(queue->table+queue->windex))-1;
*pkt = (short int)n;
udp_sendto(s,(char *)pkt,
n*sizeof(trc_event_t)+2,&queue->remote);
#ifdef DEBUG_TRCUDP
printk(KERN_DEBUG "UDP: SEND %d events,"
" index %d windex %d new %d!!!\n",n,
queue->index, queue->windex, newwindex);
#endif
queue->windex = newwindex;
}
if (queue->mustgodown) {
if (queue->windex == queue->index)
break;
}
else
task_endcycle();
}
 
return NULL;
}
 
 
static trc_event_t *trcudp_get(trcudp_queue_t *queue)
{
if (queue->mustgodown)
return NULL;
 
if (queue->index==queue->size-1) {
if (queue->windex==0) {
queue->hoops++;
return NULL;
}
queue->index=0;
return &queue->table[queue->size-1];
}
if (queue->index+1==queue->windex) {
queue->hoops++;
return NULL;
}
return &queue->table[queue->index++];
}
 
static int trcudp_post(trcudp_queue_t *queue)
{
return 0;
}
 
static void trcudp_shutdown(trcudp_queue_t *queue);
 
static int trcudp_create(trc_queue_t *p, TRC_UDP_PARMS *args)
{
trcudp_queue_t *queue;
 
if (args==NULL) {
printk(KERN_ERR "trcudp_create: you must specify a non-NULL parameter!");
return -1;
}
queue=(trcudp_queue_t*)kern_alloc(sizeof(trcudp_queue_t)+
sizeof(trc_event_t)*args->size);
if (queue==NULL) {
printk(KERN_ERR "trcudp_create: error during memory allocation!");
return -1;
}
 
p->get=(trc_event_t*(*)(void*))trcudp_get;
p->post=(int(*)(void*))trcudp_post;
p->data=queue;
queue->size=args->size;
queue->windex=queue->index=0;
queue->hoops=0;
queue->local=args->local;
queue->remote=args->remote;
/* uniq initialized in trcudp_activate */
queue->mustgodown=0;
queue->m = args->model;
/* dummy unused */
/* AFTER exit because in that way we can hope to be back in text mode... */
sys_atrunlevel((void (*)(void *))trcudp_shutdown, (void *)queue, RUNLEVEL_AFTER_EXIT);
return 0;
}
 
static int trcudp_activate(trcudp_queue_t *queue, int uniq)
{
SOFT_TASK_MODEL model;
TASK_MODEL *m;
PID pid;
 
 
queue->uniq=uniq;
 
if (!queue->m) {
soft_task_default_model(model);
soft_task_def_system(model);
/* soft_task_def_notrace(model); Should we trace the tracer? */
soft_task_def_periodic(model);
soft_task_def_period(model,250000);
soft_task_def_met(model,10000);
soft_task_def_wcet(model,10000);
/* soft_task_def_nokill(model); NOOOOOOO!!!! */
soft_task_def_arg(model,queue);
m = (TASK_MODEL *)&model;
}
else {
m = queue->m;
task_def_arg(*m,queue);
}
 
pid=task_create("trcUDP",online_tracer,m,NULL);
if (pid==-1) {
printk(KERN_ERR "can't start tracer online trcudp trace task");
} else
task_activate(pid);
 
return 0;
}
 
static int trcudp_terminate(trcudp_queue_t *queue)
{
queue->mustgodown = 1;
 
return 0;
}
 
static void trcudp_shutdown(trcudp_queue_t *queue)
{
printk(KERN_NOTICE "tracer: %li events lost into UDP queue %d",
queue->hoops, queue->uniq);
}
 
int trc_register_udp_queue(void)
{
int res;
res=trc_register_queuetype(TRC_UDP_QUEUE,
(int(*)(trc_queue_t*,void*))trcudp_create,
(int(*)(void*,int))trcudp_activate,
(int(*)(void*))trcudp_terminate
);
if (res!=0) printk(KERN_WARNING "can't register tracer trcudp queue");
return res;
}
/shark/tags/rel_0_2/kernel/modules/cabs.c
0,0 → 1,315
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: cabs.c,v 1.2 2002-10-28 07:55:54 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:54 $
------------
 
Date: 2/7/96
 
File: Cabs.C
Translated by : Giuseppe Lipari
Revision: 1.1
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <modules/cabs.h>
 
#include <kernel/config.h>
#include <ll/ll.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/descr.h>
#include <errno.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
/* "cab": contiene dei buffer per memorizzare messaggi */
/* "cab_data": buffer contenente il messaggio e informazioni relative */
/* "messaggio": si trova a partire da (*cab_data + 1) */
 
struct cab_data { /* struttura del buffer di cab */
struct cab_data *next; /* successivo buffer del cab */
unsigned short n_uso; /* processi che usano il buffer */
};
 
struct cab_desc { /* struttura descrittore di cab */
char name[MAX_CAB_NAME+1]; /* nome del CAB */
CAB next_cab_free; /* indice successivo cab libero */
BYTE busy; /* cab libero/occcupato */
char *mem_cab; /* memoria globale cab */
BYTE n_buf; /* numero dei buffer nel cab */
BYTE nfree; /* numero buffer liberi */
unsigned dim_mes; /* dimensione del messaggio */
struct cab_data *free; /* puntatore primo buffer libero*/
struct cab_data *mrd; /* puntatore must_recent_data */
};
 
static struct cab_desc cabs[MAX_CAB]; /* vettore descrittori dei CAB */
static CAB free_cab; /* indice del primo cab libero */
 
static int checkcab(CAB id)
{
if (id >= MAX_CAB) {
errno = ECAB_INVALID_ID;
return -1;
}
if (cabs[id].busy == TRUE) return TRUE;
else errno = ECAB_CLOSED;
return -1;
}
 
/*----------------------------------------------------------------------*/
/* cab_init -- inizializza le strutture dei cab */
/*----------------------------------------------------------------------*/
void CABS_register_module(void)
{
int i;
 
free_cab = 0;
for (i=0; i < MAX_CAB - 1; i++) {
cabs[i].next_cab_free = i+1;
cabs[i].busy = FALSE;
}
cabs[MAX_CAB-1].next_cab_free = NIL;
cabs[MAX_CAB-1].busy = FALSE;
// for (i = CAB_INVALID_MSG_NUM; i <= CAB_CLOSED; i++)
// exc_set(i,cab_exception);
}
 
/*----------------------------------------------------------------------*/
/* cab_create -- crea un cab, lo inizializza e restituisce l'indice */
/*----------------------------------------------------------------------*/
CAB cab_create(char *name, int dim_mes, BYTE num_mes)
{
CAB id; /* indice del cab da restituire */
struct cab_desc *pid; /* puntatore al cab (velocizza accesso) */
char *mem; /* puntatore di appoggio al buffer */
struct cab_data *tmp; /* puntatore di scorrimento lista cab */
int i; /* variabile indice */
SYS_FLAGS f;
f = kern_fsave();
 
/* Se non ci sono piu' cab liberi o il parametro num_mes < 1 */
/* solleva l'eccezioni */
 
if (num_mes < 1) {
errno = ECAB_INVALID_MSG_NUM;
kern_frestore(f);
return -1;
}
if ((id=free_cab) != MAX_CAB) {
pid = &cabs[id]; /* prendo l'indirizzo del cab */
free_cab = pid->next_cab_free;
}
else {
errno = ECAB_NO_MORE_ENTRY;
kern_frestore(f);
return -1;
}
 
/* richiede un identificatore e la memoria */
mem = kern_alloc((dim_mes + sizeof(struct cab_data)) * num_mes);
 
kern_frestore(f);
/* inizializzazione del descrittore del cab */
 
strcpy(pid->name, name);
pid->mem_cab = mem;
pid->dim_mes = dim_mes;
pid->n_buf = num_mes;
 
/* inizializzazione primo messaggio e buffer liberi */
 
pid->mrd = (struct cab_data *)mem;
i = (int)num_mes;
tmp = NULL;
while (i--) {
tmp = (struct cab_data *)mem;
mem += sizeof(struct cab_data) + dim_mes;
tmp->next = (struct cab_data *)mem;
tmp->n_uso = 0;
}
 
tmp->next = NULL;
pid->free = pid->mrd->next;
 
mem = (char *)(pid->mrd + 1);
for (i=0; i<dim_mes; i++) *(mem++) = 0;
pid->nfree = num_mes - 1;
 
f = kern_fsave();
pid->busy = TRUE;
kern_frestore(f);
 
return(id);
}
 
/*----------------------------------------------------------------------*/
/* cab_reserve -- richiede un buffer in cui mettere i dati da inviare */
/* ritorna un puntatore al buffer */
/*----------------------------------------------------------------------*/
char *cab_reserve(CAB id)
{
struct cab_desc *pid;
char *buf;
SYS_FLAGS f;
 
/* controlla l'identificatore del CAB */
if (checkcab(id) == -1) return(NULL);
 
pid = &cabs[id];
f = kern_fsave();
/* Se il numero di elementi assegnati non e` = al massimo */
if ((pid->nfree)--) {
buf = (char *)(pid->free + 1);
pid->free = pid->free->next;
kern_frestore(f);
return(buf);
}
else {
errno = ECAB_TOO_MUCH_MSG;
kern_frestore(f);
return(NULL);
}
}
 
/*----------------------------------------------------------------------*/
/* cab_putmes -- immette un nuovo messaggio nel cab */
/*----------------------------------------------------------------------*/
int cab_putmes(CAB id, char *pbuf)
{
struct cab_data *pold;
struct cab_desc *pid;
SYS_FLAGS f;
 
if (checkcab(id) == -1) return -1;
pid = &cabs[id];
 
f = kern_fsave();
pold = pid->mrd;
if (pold->n_uso == 0) {
pold->next = pid->free;
pid->free = pold;
(pid->nfree)++;
}
 
pid->mrd = ((struct cab_data *)pbuf) - 1;
kern_frestore(f);
return 1;
}
 
/*----------------------------------------------------------------------*/
/* cab_getmes -- richiede l'ultimo messaggio presente nel cab. */
/* Ritorna un puntatore al buffer piu' recente */
/*----------------------------------------------------------------------*/
char *cab_getmes(CAB id)
{
char *tmp;
SYS_FLAGS f;
 
if (checkcab(id) == -1) return(NULL);
f = kern_fsave();
 
/* cabs[id].mrd punta all'ultimo buffer inserito, incremento */
/* il puntatore di uno e ottengo l'area del messaggio, converto */
/* quindi il puntatore al tipo carattere. Segue l'incremento */
/* del campo contatore di uso buffer */
 
(cabs[id].mrd->n_uso)++;
tmp = (char *)(cabs[id].mrd + 1);
 
kern_frestore(f);
return(tmp);
}
 
/*----------------------------------------------------------------------*/
/* cab_unget -- segnala che il task non usa piu' il messaggio, */
/* se questo non e' piu' usato da nessuno viene rilasciato */
/* ritorna un risultato */
/*----------------------------------------------------------------------*/
int cab_unget(CAB id, char *pun_mes)
/* small id; indice del cab di lavoro */
/* char *pun_mes; puntatore al messaggio */
{
struct cab_data *pbuf;
struct cab_desc *pid;
SYS_FLAGS f;
 
if (checkcab(id) == -1) return -1;
pid = &cabs[id];
 
f = kern_fsave();
pbuf = ((struct cab_data *)(pun_mes)) - 1;
 
if ((--(pbuf->n_uso) == 0) && (pbuf != pid->mrd)) {
pbuf->next = pid->free;
pid->free = pbuf;
(pid->nfree)++;
}
kern_frestore(f);
return 1;
}
 
/*----------------------------------------------------------------------*/
/* cab_delete -- libera la memoria e rilascia il cab */
/*----------------------------------------------------------------------*/
void cab_delete(CAB id)
{
struct cab_desc *pid;
SYS_FLAGS f;
 
pid = &cabs[id];
f = kern_fsave();
kern_free(pid->mem_cab,(pid->dim_mes + sizeof(struct cab_data))*pid->n_buf);
 
pid->busy = FALSE;
pid->next_cab_free = free_cab;
free_cab = id;
kern_frestore(f);
}
/shark/tags/rel_0_2/kernel/modules/srp.c
0,0 → 1,793
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: srp.c,v 1.2 2002-10-28 07:55:55 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:55 $
------------
 
Stack Resource Policy. see srp.h for general details...
 
 
HOW the shadows are managed in this module
------------------------------------------
 
All the task that use SRP are inserted in an ordered list, called tasklist.
 
when a task lock a mutex and change the system ceiling, all the shadows
of the tasks with preemption level <= are set to the locking task, and
viceversa when a mutex is unlocked.
 
The real algorithm is slightly different: for example consider a task set
of 8 tasks. We represent each task here as (PID, shadow, preemption level).
 
There is also a field, current, used to scan the tasklist.
 
When the system starts, the situation is as follows:
 
system ceiling = 0, current = NIL
(a,a,1) (b,b,2) (c,c,2) (d,d,2) (e,e,3) (f,f,4) (g,g,4) (h,h,5)
 
for example, task a is scheduled, and lock a mutex that cause the system
ceiling to become 2. The situation will be the following:
 
system ceiling = 2, current = d
(a,a,1) (b,a,2) (c,a,2) (d,a,2) (e,e,3) (f,f,4) (g,g,4) (h,h,5)
 
Now suppose that task f preempts on task a. (no change to the shadows)
 
Then the task f locks a mutex and the system ceiling become 4. The shadows
will be set as follows:
 
system ceiling = 4, current = g
(a,f,1) (b,a,2) (c,a,2) (d,a,2) (e,f,3) (f,f,4) (g,f,4) (h,h,5)
 
The system maintains a stack of the locked mutexes. each mutex has in the
descriptor the space for implementing a stack, useful in the unlock()
function to undo the modify done whith the last lock()...
 
This approach minimizes the number of shadows to be set, so minimizes
the complexity of the lock/unlock operations.
 
Unfortunately, it creates a tree in the shadows (i.e., when sys_ceiling=4,
task c points to task a that points to task f, and so on....). This may
cause a performance a little worse with respect to a one-jump shadow set.
This is not a big problem because when a task is preempted it is very
difficult (if not impossible!) that it may be rescheduled before the end
of another high priority task.
 
Dynamic creation and termination of tasks
-----------------------------------------
This module allows dynamic creation and termination of tasks.
 
To be correct the system have to really activate the task only when the
system ceiling is 0.
 
To implement this there is a list, the lobbylist, that contains that tasks.
 
When a task is created and the system ceiling is > 0, the task is inserted
on the top of the list, and his activation are frozen via a call to
task_block_activations.
 
When the system_ceiling returns to 0, the lobby list is purged and for each
task in that list the task_unblock_activations is called. if the function
return a number >0, a task call task_activate is done on the task.
 
the tasks are inserted into the lobby list using only the next field.
 
 
 
When a mutex is destryed or a task is created or killed, the ceiling
have to be recalculated. The recalc is made when the system ceiling go down
to 0. to know whitch are the mutexes that need the operation they are
inserted into the srp_recalc list.
 
 
The SRP_usemutex function (see srp.h) is used to declare the used mutexes
of a task. Why this and how it works?
In this way, a task can insert directly the list of the mutexes that it uses
without allocating others resource models, but using directly the mutexes
that MUST be (in any case) initialized before the task creation...
This is done in a simple way, inheriting the SRP_mutex_t from the RES_MODEL.
When a task registers a mutex, the SRP module receive the pointer to that
mutex, so it can do all the stuffs with the needed data structures.
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*