Blame |
Last modification |
View Log
| RSS feed
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
/**
------------
CVS : $Id: rrvalue.c,v 1.1 2004-07-05 14:17:14 pj Exp $
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2004-07-05 14:17:14 $
------------
This file contains the scheduling module RRVALUE (Round Robin)
Read rrvalue.h for further details.
**/
/*
* Copyright (C) 2001 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRSOFTANTY; without even the implied waRRSOFTanty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include "rrvalue.h"
#include <ll/stdio.h>
#include <ll/string.h>
#include <kernel/model.h>
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
/*+ Status used in the level +*/
#define RRVALUE_READY MODULE_STATUS_BASE
#define RRVALUE_DELAY MODULE_STATUS_BASE+1
#define RRVALUE_IDLE MODULE_STATUS_BASE+2
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
level_des l
; /*+ the standard level descriptor +*/
int nact
[MAX_PROC
]; /*+ number of pending activations +*/
QQUEUE ready
; /*+ the ready queue +*/
int slice
; /*+ the level's time slice +*/
TIME period
[MAX_PROC
]; /*+ activation period +*/
struct timespec reactivation_time
[MAX_PROC
];
/*+ the time at witch the reactivation timer is post +*/
int reactivation_timer
[MAX_PROC
];
/*+ the recativation timer +*/
BYTE periodic
[MAX_PROC
];
struct multiboot_info
*multiboot
; /*+ used if the level have to insert
the main task +*/
BYTE models
; /*+ Task Model that the Module can Handle +*/
} RRVALUE_level_des
;
static char *RRVALUE_status_to_a
(WORD status
)
{
if (status
< MODULE_STATUS_BASE
)
return status_to_a
(status
);
switch (status
) {
case RRVALUE_READY
: return "RRVALUE_Ready";
case RRVALUE_DELAY
: return "RRVALUE_Delay";
case RRVALUE_IDLE
: return "RRVALUE_Idle";
default : return "RRVALUE_Unknown";
}
}
/* this is the periodic reactivation of the task... it is posted only
if the task is a periodic task */
static void RRVALUE_timer_reactivate
(void *par
)
{
PID p
= (PID
) par
;
RRVALUE_level_des
*lev
;
// kern_printf("react");
lev
= (RRVALUE_level_des
*)level_table
[proc_table
[p
].
task_level];
if (proc_table
[p
].
status == RRVALUE_IDLE
) {
/* the task has finished the current activation and must be
reactivated */
proc_table
[p
].
status = RRVALUE_READY
;
qq_insertlast
(p
,&lev
->ready
);
event_need_reschedule
();
}
else if (lev
->nact
[p
] >= 0)
/* the task has not completed the current activation, so we save
the activation incrementing nact... */
lev
->nact
[p
]++;
/* repost the event at the next period end... */
ADDUSEC2TIMESPEC
(lev
->period
[p
], &lev
->reactivation_time
[p
]);
lev
->reactivation_timer
[p
] = kern_event_post
(&lev
->reactivation_time
[p
],
RRVALUE_timer_reactivate
,
(void *)p
);
/* tracer stuff */
// trc_logevent(TRC_INTACTIVATION,&p);
}
/*+ this function is called when a task finish his delay +*/
static void RRVALUE_timer_delay
(void *par
)
{
PID p
= (PID
) par
;
RRVALUE_level_des
*lev
;
lev
= (RRVALUE_level_des
*)level_table
[proc_table
[p
].
task_level];
proc_table
[p
].
status = RRVALUE_READY
;
qq_insertlast
(p
,&lev
->ready
);
proc_table
[p
].
delay_timer = NIL
; /* Paranoia */
// kern_printf(" DELAY TIMER %d ", p);
event_need_reschedule
();
}
static int RRVALUE_level_accept_task_model
(LEVEL l
, TASK_MODEL
*m
)
{
RRVALUE_level_des
*lev
= (RRVALUE_level_des
*)(level_table
[l
]);
if ((m
->pclass
== NRT_PCLASS
|| m
->pclass
== (NRT_PCLASS
| l
)) && lev
->models
& RRVALUE_ONLY_NRT
)
return 0;
else if ((m
->pclass
== SOFT_PCLASS
|| m
->pclass
== (SOFT_PCLASS
| l
)) && lev
->models
& RRVALUE_ONLY_SOFT
)
return 0;
else if ((m
->pclass
== HARD_PCLASS
|| m
->pclass
== (HARD_PCLASS
| l
)) && lev
->models
& RRVALUE_ONLY_HARD
)
return 0;
else if ((m
->pclass
== VALUE_PCLASS
|| m
->pclass
== (VALUE_PCLASS
| l
)) && lev
->models
& RRVALUE_ONLY_VALUE
)
return 0;
else
return -1;
}
static int RRVALUE_level_accept_guest_model
(LEVEL l
, TASK_MODEL
*m
)
{
return -1;
}
static void RRVALUE_level_status
(LEVEL l
)
{
RRVALUE_level_des
*lev
= (RRVALUE_level_des
*)(level_table
[l
]);
PID p
= qq_queryfirst
(&lev
->ready
);
kern_printf
("Slice: %d \n", lev
->slice
);
while (p
!= NIL
) {
kern_printf
("Pid: %d\t Name: %20s Status: %s\n",p
,proc_table
[p
].
name,
RRVALUE_status_to_a
(proc_table
[p
].
status));
p
= proc_table
[p
].
next;
}
for (p
=0; p
<MAX_PROC
; p
++)
if (proc_table
[p
].
task_level == l
&& proc_table
[p
].
status != RRVALUE_READY
&& proc_table
[p
].
status != FREE
)
kern_printf
("Pid: %d\t Name: %20s Status: %s\n",p
,proc_table
[p
].
name,
RRVALUE_status_to_a
(proc_table
[p
].
status));
}
/* This is not efficient but very fair :-)
The need of all this stuff is because if a task execute a long time
due to (shadow!) priority inheritance, then the task shall go to the
tail of the queue many times... */
static PID RRVALUE_level_scheduler
(LEVEL l
)
{
RRVALUE_level_des
*lev
= (RRVALUE_level_des
*)(level_table
[l
]);
PID p
;
for (;;) {
p
= qq_queryfirst
(&lev
->ready
);
if (p
== -1)
return p
;
//{kern_printf("(s%d)",p); return p;}
// kern_printf("(p=%d l=%d avail=%d wcet =%d)\n",p,l,proc_table[p].avail_time, proc_table[p].wcet);
if (proc_table
[p
].
avail_time <= 0) {
proc_table
[p
].
avail_time += proc_table
[p
].
wcet;
qq_extract
(p
,&lev
->ready
);
qq_insertlast
(p
,&lev
->ready
);
}
else
//{kern_printf("(s%d)",p); return p;}
return p
;
}
}
static int RRVALUE_level_guarantee
(LEVEL l
, bandwidth_t
*freebandwidth
)
{
/* the RRVALUE level always guarantee... the function is defined because
there can be an aperiodic server at a level with less priority than
the RRVALUE that need guarantee (e.g., a TBS server) */
return 1;
}
static int RRVALUE_task_create
(LEVEL l
, PID p
, TASK_MODEL
*m
)
{
RRVALUE_level_des
*lev
= (RRVALUE_level_des
*)(level_table
[l
]);
// kern_printf("create %d mod %d\n",p,m->pclass);
/* the task state is set at SLEEP by the general task_create
the only thing to set remains the capacity stuffs that are set
to the values passed in the model... */
/* I used the wcet field because using wcet can account if a task
consume more than the timeslice... */
if (lev
->models
& RRVALUE_ONLY_NRT
&&
(m
->pclass
== NRT_PCLASS
|| m
->pclass
== (NRT_PCLASS
| l
))) {
NRT_TASK_MODEL
*nrt
= (NRT_TASK_MODEL
*)m
;
// kern_printf("nrt");
if (nrt
->slice
) {
proc_table
[p
].
avail_time = nrt
->slice
;
proc_table
[p
].
wcet = nrt
->slice
;
}
else {
proc_table
[p
].
avail_time = lev
->slice
;
proc_table
[p
].
wcet = lev
->slice
;
}
proc_table
[p
].
control |= CONTROL_CAP
;
if (nrt
->arrivals
== SAVE_ARRIVALS
)
lev
->nact
[p
] = 0;
else
lev
->nact
[p
] = -1;
lev
->periodic
[p
] = 0;
lev
->period
[p
] = 0;
}
else if (lev
->models
& RRVALUE_ONLY_SOFT
&&
(m
->pclass
== SOFT_PCLASS
|| m
->pclass
== (SOFT_PCLASS
| l
))) {
SOFT_TASK_MODEL
*soft
= (SOFT_TASK_MODEL
*)m
;
// kern_printf("soft");
proc_table
[p
].
avail_time = lev
->slice
;
proc_table
[p
].
wcet = lev
->slice
;
proc_table
[p
].
control |= CONTROL_CAP
;
if (soft
->arrivals
== SAVE_ARRIVALS
)
lev
->nact
[p
] = 0;
else
lev
->nact
[p
] = -1;
if (soft
->periodicity
== PERIODIC
) {
lev
->periodic
[p
] = 1;
lev
->period
[p
] = soft
->period
;
}
else {
lev
->periodic
[p
] = 0;
lev
->period
[p
] = 0;
}
}
else if (lev
->models
& RRVALUE_ONLY_HARD
&&
(m
->pclass
== HARD_PCLASS
|| m
->pclass
== (HARD_PCLASS
| l
))) {
HARD_TASK_MODEL
*hard
= (HARD_TASK_MODEL
*)m
;
// kern_printf("hard");
proc_table
[p
].
avail_time = lev
->slice
;
proc_table
[p
].
wcet = lev
->slice
;
proc_table
[p
].
control |= CONTROL_CAP
;
lev
->nact
[p
] = 0;
if (hard
->periodicity
== PERIODIC
) {
lev
->periodic
[p
] = 1;
lev
->period
[p
] = hard
->mit
;
}
else {
lev
->periodic
[p
] = 0;
lev
->period
[p
] = 0;
}
}
if (lev
->models
& RRVALUE_ONLY_VALUE
&&
(m
->pclass
== VALUE_PCLASS
|| m
->pclass
== (VALUE_PCLASS
| l
))) {
VALUE_TASK_MODEL
*nrt
= (NRT_TASK_MODEL
*)m
;
proc_table
[p
].
avail_time = lev
->slice
;
proc_table
[p
].
wcet = lev
->slice
;
proc_table
[p
].
control |= CONTROL_CAP
;
lev
->nact
[p
] = -1;
lev
->periodic
[p
] = 0;
lev
->period
[p
] = 0;
}
return 0; /* OK */
}
static void RRVALUE_task_detach
(LEVEL l
, PID p
)
{
/* the RRVALUE level doesn't introduce any new field in the TASK_MODEL
so, all detach stuffs are done by the task_create
The task state is set at FREE by the general task_create */
}
static int RRVALUE_task_eligible
(LEVEL l
, PID p
)
{
return 0; /* if the task p is chosen, it is always eligible */
}
#ifdef __TEST1__
extern int testactive
;
extern struct timespec s_stime
[];
extern TIME s_curr
[];
extern TIME s_PID
[];
extern int useds
;
#endif
static void RRVALUE_task_dispatch
(LEVEL l
, PID p
, int nostop
)
{
RRVALUE_level_des
*lev
= (RRVALUE_level_des
*)(level_table
[l
]);
//static int p2count=0;
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
qq_extract
(p
, &lev
->ready
);
}
static void RRVALUE_task_epilogue
(LEVEL l
, PID p
)
{
RRVALUE_level_des
*lev
= (RRVALUE_level_des
*)(level_table
[l
]);
/* check if the slice is finished and insert the task in the coRRVALUEect
qqueue position */
if (proc_table
[p
].
avail_time <= 0) {
proc_table
[p
].
avail_time += proc_table
[p
].
wcet;
qq_insertlast
(p
,&lev
->ready
);
}
else
/* curr is >0, so the running task have to run for another cuRRVALUE usec */
qq_insertfirst
(p
,&lev
->ready
);
proc_table
[p
].
status = RRVALUE_READY
;
}
static void RRVALUE_task_activate
(LEVEL l
, PID p
)
{
RRVALUE_level_des
*lev
= (RRVALUE_level_des
*)(level_table
[l
]);
/* Test if we are trying to activate a non sleeping task */
/* save activation (only if needed... */
if (proc_table
[p
].
status != SLEEP
&& proc_table
[p
].
status != RRVALUE_IDLE
) {
if (lev
->nact
[p
] != -1)
lev
->nact
[p
]++;
return;
}
ll_gettime
(TIME_EXACT
, &proc_table
[p
].
request_time);
/* Insert task in the coRRVALUEect position */
proc_table
[p
].
status = RRVALUE_READY
;
qq_insertlast
(p
,&lev
->ready
);
/* Set the reactivation timer */
if (lev
->periodic
[p
])
{
TIMESPEC_ASSIGN
(&lev
->reactivation_time
[p
], &proc_table
[p
].
request_time);
ADDUSEC2TIMESPEC
(lev
->period
[p
], &lev
->reactivation_time
[p
]);
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]);
lev
->reactivation_timer
[p
] = kern_event_post
(&lev
->reactivation_time
[p
],
RRVALUE_timer_reactivate
,
(void *)p
);
}
}
static void RRVALUE_task_insert
(LEVEL l
, PID p
)
{
RRVALUE_level_des
*lev
= (RRVALUE_level_des
*)(level_table
[l
]);
/* Similar to RRVALUE_task_activate, but we don't check in what state
the task is and we don't set the request_time */
/* Insert task in the coRRVALUEect position */
proc_table
[p
].
status = RRVALUE_READY
;
qq_insertlast
(p
,&lev
->ready
);
}
static void RRVALUE_task_extract
(LEVEL l
, PID p
)
{
/* Extract the running task from the level
. we have already extract it from the ready queue at the dispatch time.
. the capacity event have to be removed by the generic kernel
. the wcet don't need modification...
. the state of the task is set by the calling function
So, we do nothing!!!
*/
}
static void RRVALUE_task_endcycle
(LEVEL l
, PID p
)
{
RRVALUE_level_des
*lev
= (RRVALUE_level_des
*)(level_table
[l
]);
if (lev
->nact
[p
] > 0) {
/* continue!!!! */
ll_gettime
(TIME_EXACT
, &proc_table
[p
].
request_time);
lev
->nact
[p
]--;
// qq_insertlast(p,&lev->ready);
qq_insertfirst
(p
,&lev
->ready
);
proc_table
[p
].
status = RRVALUE_READY
;
}
else
proc_table
[p
].
status = RRVALUE_IDLE
;
}
static void RRVALUE_task_end
(LEVEL l
, PID p
)
{
RRVALUE_level_des
*lev
= (RRVALUE_level_des
*)(level_table
[l
]);
lev
->nact
[p
] = -1;
/* we delete the reactivation timer */
if (lev
->periodic
[p
]) {
event_delete
(lev
->reactivation_timer
[p
]);
lev
->reactivation_timer
[p
] = -1;
}
/* then, we insert the task in the free queue */
proc_table
[p
].
status = FREE
;
q_insert
(p
,&freedesc
);
}
static void RRVALUE_task_sleep
(LEVEL l
, PID p
)
{
RRVALUE_level_des
*lev
= (RRVALUE_level_des
*)(level_table
[l
]);
if (lev
->nact
[p
] >= 0) lev
->nact
[p
] = 0;
/* we delete the reactivation timer */
if (lev
->periodic
[p
]) {
event_delete
(lev
->reactivation_timer
[p
]);
lev
->reactivation_timer
[p
] = -1;
}
proc_table
[p
].
status = SLEEP
;
}
static void RRVALUE_task_delay
(LEVEL l
, PID p
, TIME usdelay
)
{
// RRVALUE_level_des *lev = (RRVALUE_level_des *)(level_table[l]);
struct timespec wakeuptime
;
/* equal to RRVALUE_task_endcycle */
proc_table
[p
].
status = RRVALUE_DELAY
;
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime
(TIME_EXACT
,&wakeuptime
);
ADDUSEC2TIMESPEC
(usdelay
,&wakeuptime
);
proc_table
[p
].
delay_timer = kern_event_post
(&wakeuptime
,
RRVALUE_timer_delay
,
(void *)p
);
}
static int RRVALUE_guest_create
(LEVEL l
, PID p
, TASK_MODEL
*m
)
{ kern_raise
(XUNVALID_GUEST
,exec_shadow
); return 0; }
static void RRVALUE_guest_detach
(LEVEL l
, PID p
)
{ kern_raise
(XUNVALID_GUEST
,exec_shadow
); }
static void RRVALUE_guest_dispatch
(LEVEL l
, PID p
, int nostop
)
{ kern_raise
(XUNVALID_GUEST
,exec_shadow
); }
static void RRVALUE_guest_epilogue
(LEVEL l
, PID p
)
{ kern_raise
(XUNVALID_GUEST
,exec_shadow
); }
static void RRVALUE_guest_activate
(LEVEL l
, PID p
)
{ kern_raise
(XUNVALID_GUEST
,exec_shadow
); }
static void RRVALUE_guest_insert
(LEVEL l
, PID p
)
{ kern_raise
(XUNVALID_GUEST
,exec_shadow
); }
static void RRVALUE_guest_extract
(LEVEL l
, PID p
)
{ kern_raise
(XUNVALID_GUEST
,exec_shadow
); }
static void RRVALUE_guest_endcycle
(LEVEL l
, PID p
)
{ kern_raise
(XUNVALID_GUEST
,exec_shadow
); }
static void RRVALUE_guest_end
(LEVEL l
, PID p
)
{ kern_raise
(XUNVALID_GUEST
,exec_shadow
); }
static void RRVALUE_guest_sleep
(LEVEL l
, PID p
)
{ kern_raise
(XUNVALID_GUEST
,exec_shadow
); }
static void RRVALUE_guest_delay
(LEVEL l
, PID p
,DWORD tickdelay
)
{ kern_raise
(XUNVALID_GUEST
,exec_shadow
); }
/* Registration functions */
/*+ This init function install the "main" task +*/
static void RRVALUE_call_main
(void *l
)
{
LEVEL lev
;
PID p
;
NRT_TASK_MODEL m
;
void *mb
;
lev
= (LEVEL
)l
;
nrt_task_default_model
(m
);
nrt_task_def_level
(m
,lev
); /* with this we are sure that the task aRRVALUEives
to the coRRVALUEect level */
mb
= ((RRVALUE_level_des
*)level_table
[lev
])->multiboot
;
nrt_task_def_arg
(m
,mb
);
nrt_task_def_usemath
(m
);
nrt_task_def_nokill
(m
);
nrt_task_def_ctrl_jet
(m
);
p
= task_create
("Main", __init__
, (TASK_MODEL
*)&m
, NULL
);
if (p
== NIL
)
printk
("\nPanic!!! can't create main task...\n");
RRVALUE_task_activate
(lev
,p
);
}
/*+ Registration function:
TIME slice the slice for the Round Robin queue
int createmain 1 if the level creates the main task 0 otherwise
struct multiboot_info *mb used if createmain specified +*/
void RRVALUE_register_level
(TIME slice
,
int createmain
,
struct multiboot_info
*mb
,
BYTE models
)
{
LEVEL l
; /* the level that we register */
RRVALUE_level_des
*lev
; /* for readableness only */
PID i
;
printk
("RRVALUE_register_level\n");
/* request an entry in the level_table */
l
= level_alloc_descriptor
();
/* alloc the space needed for the RRVALUE_level_des */
lev
= (RRVALUE_level_des
*)kern_alloc
(sizeof(RRVALUE_level_des
));
printk
(" lev=%d\n",(int)lev
);
/* update the level_table with the new entry */
level_table
[l
] = (level_des
*)lev
;
/* fill the standard descriptor */
strncpy(lev
->l.
level_name, RRVALUE_LEVELNAME
, MAX_LEVELNAME
);
lev
->l.
level_code = RRVALUE_LEVEL_CODE
;
lev
->l.
level_version = RRVALUE_LEVEL_VERSION
;
lev
->l.
level_accept_task_model = RRVALUE_level_accept_task_model
;
lev
->l.
level_accept_guest_model = RRVALUE_level_accept_guest_model
;
lev
->l.
level_status = RRVALUE_level_status
;
lev
->l.
level_scheduler = RRVALUE_level_scheduler
;
lev
->l.
level_guarantee = RRVALUE_level_guarantee
;
lev
->l.
task_create = RRVALUE_task_create
;
lev
->l.
task_detach = RRVALUE_task_detach
;
lev
->l.
task_eligible = RRVALUE_task_eligible
;
lev
->l.
task_dispatch = RRVALUE_task_dispatch
;
lev
->l.
task_epilogue = RRVALUE_task_epilogue
;
lev
->l.
task_activate = RRVALUE_task_activate
;
lev
->l.
task_insert = RRVALUE_task_insert
;
lev
->l.
task_extract = RRVALUE_task_extract
;
lev
->l.
task_endcycle = RRVALUE_task_endcycle
;
lev
->l.
task_end = RRVALUE_task_end
;
lev
->l.
task_sleep = RRVALUE_task_sleep
;
lev
->l.
task_delay = RRVALUE_task_delay
;
lev
->l.
guest_create = RRVALUE_guest_create
;
lev
->l.
guest_detach = RRVALUE_guest_detach
;
lev
->l.
guest_dispatch = RRVALUE_guest_dispatch
;
lev
->l.
guest_epilogue = RRVALUE_guest_epilogue
;
lev
->l.
guest_activate = RRVALUE_guest_activate
;
lev
->l.
guest_insert = RRVALUE_guest_insert
;
lev
->l.
guest_extract = RRVALUE_guest_extract
;
lev
->l.
guest_endcycle = RRVALUE_guest_endcycle
;
lev
->l.
guest_end = RRVALUE_guest_end
;
lev
->l.
guest_sleep = RRVALUE_guest_sleep
;
lev
->l.
guest_delay = RRVALUE_guest_delay
;
/* fill the RRVALUE descriptor part */
for (i
= 0; i
< MAX_PROC
; i
++) {
lev
->nact
[i
] = -1;
NULL_TIMESPEC
(&lev
->reactivation_time
[i
]);
lev
->reactivation_timer
[i
] = -1;
lev
->periodic
[i
] = 0;
lev
->period
[i
] = 0;
}
qq_init
(&lev
->ready
);
if (slice
< RRVALUE_MINIMUM_SLICE
) slice
= RRVALUE_MINIMUM_SLICE
;
if (slice
> RRVALUE_MAXIMUM_SLICE
) slice
= RRVALUE_MAXIMUM_SLICE
;
lev
->slice
= slice
;
lev
->multiboot
= mb
;
lev
->models
= models
;
if (createmain
)
sys_atrunlevel
(RRVALUE_call_main
,(void *) l
, RUNLEVEL_INIT
);
}