/shark/tags/rel_0_4/kernel/modules/edf2.c |
---|
File deleted |
/shark/tags/rel_0_4/kernel/modules/old/trace.c |
---|
File deleted |
/shark/tags/rel_0_4/kernel/modules/makefile |
---|
3,6 → 3,7 |
ifndef BASE |
BASE=../.. |
endif |
include $(BASE)/config/config.mk |
LIBRARY = mod |
42,10 → 43,10 |
TRC_OBJ = trace.o \ |
trcdummy.o \ |
trcfixed.o \ |
trccirc.o |
trccirc.o \ |
trcdfix.o \ |
trcudp.o |
# trcudp.o |
OBJS = $(SCHED_OBJ) $(APER_OBJ) $(RES_OBJ) $(TRC_OBJ) |
include $(BASE)/config/lib.mk |
/shark/tags/rel_0_4/kernel/modules/edf.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: edf.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: edf.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the scheduling module EDF (Earliest Deadline First) |
34,7 → 34,7 |
**/ |
/* |
* Copyright (C) 2000 Paolo Gai |
* Copyright (C) 2000,2002 Paolo Gai |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
62,12 → 62,11 |
#include <kernel/func.h> |
#include <kernel/trace.h> |
//#define edf_printf kern_printf |
#define edf_printf printk |
//#define EDFDEBUG |
#define edf_printf kern_printf |
/*+ Status used in the level +*/ |
#define EDF_READY MODULE_STATUS_BASE /*+ - Ready status +*/ |
#define EDF_DELAY MODULE_STATUS_BASE+1 /*+ - Delay status +*/ |
#define EDF_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/ |
#define EDF_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/ |
#define EDF_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/ |
90,7 → 89,7 |
/*+ used to manage the JOB_TASK_MODEL and the |
periodicity +*/ |
QUEUE ready; /*+ the ready queue +*/ |
IQUEUE ready; /*+ the ready queue +*/ |
int flags; /*+ the init flags... +*/ |
99,28 → 98,15 |
} EDF_level_des; |
static char *EDF_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case EDF_READY : return "EDF_Ready"; |
case EDF_DELAY : return "EDF_Delay"; |
case EDF_WCET_VIOLATED: return "EDF_Wcet_Violated"; |
case EDF_WAIT : return "EDF_Sporadic_Wait"; |
case EDF_IDLE : return "EDF_Idle"; |
case EDF_ZOMBIE : return "EDF_Zombie"; |
default : return "EDF_Unknown"; |
} |
} |
static void EDF_timer_deadline(void *par) |
{ |
PID p = (PID) par; |
EDF_level_des *lev; |
struct timespec *temp; |
#ifdef EDFDEBUG |
edf_printf("$"); |
#endif |
lev = (EDF_level_des *)level_table[proc_table[p].task_level]; |
128,7 → 114,7 |
case EDF_ZOMBIE: |
/* we finally put the task in the ready queue */ |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
iq_insertfirst(p,&freedesc); |
/* and free the allocated bandwidth */ |
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet; |
break; |
137,18 → 123,17 |
/* tracer stuff */ |
trc_logevent(TRC_INTACTIVATION,&p); |
/* similar to EDF_task_activate */ |
TIMESPEC_ASSIGN(&proc_table[p].request_time, |
&proc_table[p].timespec_priority); |
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority); |
temp = iq_query_timespec(p,&lev->ready); |
ADDUSEC2TIMESPEC(lev->period[p], temp); |
proc_table[p].status = EDF_READY; |
q_timespec_insert(p,&lev->ready); |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
iq_timespec_insert(p,&lev->ready); |
lev->deadline_timer[p] = kern_event_post(temp, |
EDF_timer_deadline, |
(void *)p); |
edf_printf("(dline p%d ev%d %d.%d)",(int)p,(int)lev->deadline_timer[p],(int)proc_table[p].timespec_priority.tv_sec,(int)proc_table[p].timespec_priority.tv_nsec/1000); |
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority ); |
#ifdef EDFDEBUG |
edf_printf("(dline p%d ev%d %d.%d)",(int)p,(int)lev->deadline_timer[p],(int)temp->tv_sec,(int)temp->tv_nsec/1000); |
#endif |
event_need_reschedule(); |
printk("el%d|",p); |
break; |
case EDF_WAIT: |
158,8 → 143,10 |
default: |
/* else, a deadline miss occurred!!! */ |
#ifdef EDFDEBUG |
edf_printf("\nstatus %d\n", (int)proc_table[p].status); |
edf_printf("timer_deadline:AAARRRGGGHHH!!!"); |
#endif |
kern_raise(XDEADLINE_MISS,p); |
} |
} |
168,116 → 155,26 |
{ |
PID p = (PID) par; |
#ifdef EDFDEBUG |
edf_printf("AAARRRGGGHHH!!!"); |
#endif |
kern_raise(XDEADLINE_MISS,p); |
} |
/*+ this function is called when a task finish his delay +*/ |
static void EDF_timer_delay(void *par) |
/* The scheduler only gets the first task in the queue */ |
static PID EDF_public_scheduler(LEVEL l) |
{ |
PID p = (PID) par; |
EDF_level_des *lev; |
lev = (EDF_level_des *)level_table[proc_table[p].task_level]; |
proc_table[p].status = EDF_READY; |
q_timespec_insert(p,&lev->ready); |
proc_table[p].delay_timer = NIL; /* Paranoia */ |
event_need_reschedule(); |
} |
static int EDF_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) { |
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m; |
if (h->wcet && h->mit) |
return 0; |
} |
return -1; |
} |
static int EDF_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == JOB_PCLASS || m->pclass == (JOB_PCLASS | l)) |
return 0; |
else |
return -1; |
} |
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
static void EDF_level_status(LEVEL l) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
PID p = lev->ready; |
kern_printf("Wcet Check : %s\n", |
onoff(lev->flags & EDF_ENABLE_WCET_CHECK)); |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & EDF_ENABLE_GUARANTEE)); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
#ifdef EDFDEBUG |
edf_printf("(s%d)", iq_query_first(&lev->ready)); |
#endif |
while (p != NIL) { |
if ((proc_table[p].pclass) == JOB_PCLASS) |
kern_printf("Pid: %2d (GUEST)\n", p); |
else |
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n", |
p, |
proc_table[p].name, |
lev->flag[p] & EDF_FLAG_SPORADIC ? "MinITime" : "Period ", |
lev->period[p], |
proc_table[p].timespec_priority.tv_sec, |
proc_table[p].timespec_priority.tv_nsec/1000, |
EDF_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != EDF_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n", |
p, |
proc_table[p].name, |
lev->flag[p] & EDF_FLAG_SPORADIC ? "MinITime" : "Period ", |
lev->period[p], |
proc_table[p].timespec_priority.tv_sec, |
proc_table[p].timespec_priority.tv_nsec/1000, |
EDF_status_to_a(proc_table[p].status)); |
return iq_query_first(&lev->ready); |
} |
/* The scheduler only gets the first task in the queue */ |
static PID EDF_level_scheduler(LEVEL l) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* { // print 4 dbg the ready queue |
PID p= lev->ready; |
kern_printf("(s"); |
while (p != NIL) { |
kern_printf("%d ",p); |
p = proc_table[p].next; |
} |
kern_printf(") "); |
} |
*/ |
return (PID)lev->ready; |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int EDF_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int EDF_public_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
295,14 → 192,20 |
} |
static int EDF_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int EDF_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
HARD_TASK_MODEL *h; |
/* if the EDF_task_create is called, then the pclass must be a |
valid pclass. */ |
if (m->pclass != HARD_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
h = (HARD_TASK_MODEL *)m; |
if (!h->wcet || !h->mit) return -1; |
/* now we know that m is a valid model */ |
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m; |
#ifdef EDFDEBUG |
edf_printf("(cr%d)", p); |
#endif |
lev->period[p] = h->mit; |
346,7 → 249,7 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
static void EDF_task_detach(LEVEL l, PID p) |
static void EDF_public_detach(LEVEL l, PID p) |
{ |
/* the EDF level doesn't introduce any dinamic allocated new field. |
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated |
354,6 → 257,10 |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
#ifdef EDFDEBUG |
edf_printf("(det%d)", p); |
#endif |
if (lev->flags & EDF_FAILED_GUARANTEE) |
lev->flags &= ~EDF_FAILED_GUARANTEE; |
else |
360,46 → 267,27 |
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet; |
} |
static int EDF_task_eligible(LEVEL l, PID p) |
static void EDF_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void EDF_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
#ifdef EDFDEBUG |
edf_printf("(disp p%d %d.%d)",(int)p,(int)schedule_time.tv_sec,(int)schedule_time.tv_nsec/1000); |
#endif |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
q_extract(p, &lev->ready); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
iq_extract(p, &lev->ready); |
} |
static void EDF_task_epilogue(LEVEL l, PID p) |
static void EDF_public_epilogue(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
#ifdef EDFDEBUG |
edf_printf("(epil p%d %d.%d)",p,(int)schedule_time.tv_sec,(int)schedule_time.tv_nsec/1000); |
#endif |
/* check if the wcet is finished... */ |
if ((lev->flags & EDF_ENABLE_WCET_CHECK) && proc_table[p].avail_time <= 0) { |
409,15 → 297,20 |
} |
else { |
/* the task has been preempted. it returns into the ready queue... */ |
q_timespec_insert(p,&lev->ready); |
iq_timespec_insert(p,&lev->ready); |
proc_table[p].status = EDF_READY; |
} |
} |
static void EDF_task_activate(LEVEL l, PID p) |
static void EDF_public_activate(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
struct timespec *temp; |
#ifdef EDFDEBUG |
edf_printf("(act%d)", p); |
#endif |
if (proc_table[p].status == EDF_WAIT) { |
kern_raise(XACTIVATION,p); |
return; |
431,36 → 324,36 |
/* see also EDF_timer_deadline */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
temp = iq_query_timespec(p, &lev->ready); |
kern_gettime(temp); |
ADDUSEC2TIMESPEC(lev->period[p], temp); |
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, |
&proc_table[p].request_time); |
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority); |
/* Insert task in the correct position */ |
proc_table[p].status = EDF_READY; |
q_timespec_insert(p,&lev->ready); |
iq_timespec_insert(p,&lev->ready); |
/* Set the deadline timer */ |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
lev->deadline_timer[p] = kern_event_post(temp, |
EDF_timer_deadline, |
(void *)p); |
edf_printf("(dline p%d ev%d %d.%d)",p,(int)lev->deadline_timer[p],(int)proc_table[p].timespec_priority.tv_sec,(int)proc_table[p].timespec_priority.tv_nsec/1000); |
#ifdef EDFDEBUG |
edf_printf("(dline p%d ev%d %d.%d)",p,(int)lev->deadline_timer[p],(int)temp->tv_sec,(int)temp->tv_nsec/1000); |
#endif |
} |
static void EDF_task_insert(LEVEL l, PID p) |
static void EDF_public_unblock(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* Similar to EDF_task_activate, but we don't check in what state |
the task is and we don't set the request_time*/ |
/* Similar to EDF_task_activate, |
but we don't check in what state the task is */ |
/* Insert task in the coEDFect position */ |
proc_table[p].status = EDF_READY; |
q_timespec_insert(p,&lev->ready); |
iq_timespec_insert(p,&lev->ready); |
} |
static void EDF_task_extract(LEVEL l, PID p) |
static void EDF_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
473,30 → 366,35 |
*/ |
} |
static void EDF_task_endcycle(LEVEL l, PID p) |
static int EDF_public_message(LEVEL l, PID p, void *m) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
#ifdef EDFDEBUG |
edf_printf("(ecyc p%d %d.%d)",p,(int)schedule_time.tv_sec,(int)schedule_time.tv_nsec/1000); |
#endif |
/* the task has terminated his job before it consume the wcet. All OK! */ |
if (lev->flag[p] & EDF_FLAG_SPORADIC) |
if (!lev->flag[p] & EDF_FLAG_SPORADIC) |
proc_table[p].status = EDF_IDLE; |
else |
proc_table[p].status = EDF_WAIT; |
else /* pclass = sporadic_pclass */ |
proc_table[p].status = EDF_IDLE; |
/* we reset the capacity counters... */ |
if (lev->flags & EDF_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
/* when the deadline timer fire, it recognize the situation and set |
correctly all the stuffs (like reactivation, request_time, etc... ) */ |
correctly all the stuffs (like reactivation, sleep, etc... ) */ |
return 0; |
} |
static void EDF_task_end(LEVEL l, PID p) |
static void EDF_public_end(LEVEL l, PID p) |
{ |
// EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
proc_table[p].status = EDF_ZOMBIE; |
/* When the deadline timer fire, it put the task descriptor in |
503,182 → 401,81 |
the free queue, and free the allocated bandwidth... */ |
} |
static void EDF_task_sleep(LEVEL l, PID p) |
static void EDF_private_insert(LEVEL l, PID p, TASK_MODEL *m) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
JOB_TASK_MODEL *job; |
/* the task has terminated his job before it consume the wcet. All OK! */ |
proc_table[p].status = EDF_WAIT; |
if (m->pclass != JOB_PCLASS || (m->level != 0 && m->level != l) ) { |
kern_raise(XINVALID_TASK, p); |
return; |
} |
/* we reset the capacity counters... */ |
if (lev->flags & EDF_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
job = (JOB_TASK_MODEL *)m; |
/* when the deadline timer fire, it recognize the situation and set |
correctly the task state to sleep... */ |
} |
static void EDF_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
struct timespec wakeuptime; |
// EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* equal to EDF_task_endcycle */ |
proc_table[p].status = EDF_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT, &wakeuptime); |
ADDUSEC2TIMESPEC(usdelay, &wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
EDF_timer_delay, |
(void *)p); |
} |
/* Guest Functions |
These functions manages a JOB_TASK_MODEL, that is used to put |
a guest task in the EDF ready queue. */ |
static int EDF_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
JOB_TASK_MODEL *job = (JOB_TASK_MODEL *)m; |
/* if the EDF_guest_create is called, then the pclass must be a |
valid pclass. */ |
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &job->deadline); |
/* Insert task in the correct position */ |
*iq_query_timespec(p, &lev->ready) = job->deadline; |
iq_timespec_insert(p,&lev->ready); |
proc_table[p].status = EDF_READY; |
lev->deadline_timer[p] = -1; |
if (job->noraiseexc) |
lev->period[p] = job->period; |
/* Set the deadline timer */ |
if (!(job->noraiseexc)) |
lev->flag[p] = EDF_FLAG_NORAISEEXC; |
else |
else { |
lev->flag[p] = 0; |
lev->period[p] = job->period; |
/* there is no bandwidth guarantee at this level, it is performed |
by the level that inserts guest tasks... */ |
return 0; /* OK, also if the task cannot be guaranteed... */ |
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready), |
EDF_timer_guest_deadline, |
(void *)p); |
} |
} |
static void EDF_guest_detach(LEVEL l, PID p) |
static void EDF_private_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the EDF level doesn't introduce any dinamic allocated new field. |
No guarantee is performed on guest tasks... so we don't have to reset |
the NO_GUARANTEE FIELD */ |
} |
static void EDF_guest_dispatch(LEVEL l, PID p, int nostop) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* the task state is set to EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
q_extract(p, &lev->ready); |
iq_extract(p, &lev->ready); |
} |
static void EDF_guest_epilogue(LEVEL l, PID p) |
static void EDF_private_epilogue(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* the task has been preempted. it returns into the ready queue... */ |
q_timespec_insert(p,&lev->ready); |
iq_timespec_insert(p,&lev->ready); |
proc_table[p].status = EDF_READY; |
} |
static void EDF_guest_activate(LEVEL l, PID p) |
static void EDF_private_extract(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* Insert task in the correct position */ |
q_timespec_insert(p,&lev->ready); |
proc_table[p].status = EDF_READY; |
/* Set the deadline timer */ |
if (!(lev->flag[p] & EDF_FLAG_NORAISEEXC)) |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
EDF_timer_guest_deadline, |
(void *)p); |
} |
static void EDF_guest_insert(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* Insert task in the correct position */ |
q_timespec_insert(p,&lev->ready); |
proc_table[p].status = EDF_READY; |
} |
static void EDF_guest_extract(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
. the state of the task is set by the calling function |
. the deadline must remain... |
So, we do nothing!!! |
*/ |
} |
static void EDF_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void EDF_guest_end(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
//kern_printf("EDF_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
#ifdef EDFDEBUG |
edf_printf("EDF_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
#endif |
if (proc_table[p].status == EDF_READY) |
{ |
q_extract(p, &lev->ready); |
//kern_printf("(g_end rdy extr)"); |
} |
else if (proc_table[p].status == EDF_DELAY) { |
event_delete(proc_table[p].delay_timer); |
proc_table[p].delay_timer = NIL; /* paranoia */ |
} |
iq_extract(p, &lev->ready); |
/* we remove the deadline timer, because the slice is finished */ |
if (lev->deadline_timer[p] != NIL) { |
// kern_printf("EDF_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
event_delete(lev->deadline_timer[p]); |
kern_event_delete(lev->deadline_timer[p]); |
lev->deadline_timer[p] = NIL; |
} |
} |
static void EDF_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void EDF_guest_delay(LEVEL l, PID p, TIME usdelay) |
{ |
struct timespec wakeuptime; |
// EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* equal to EDF_task_endcycle */ |
proc_table[p].status = EDF_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT, &wakeuptime); |
ADDUSEC2TIMESPEC(usdelay, &wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
EDF_timer_delay, |
(void *)p); |
} |
/* Registration functions */ |
/*+ Registration function: |
int flags the init flags ... see edf.h +*/ |
void EDF_register_level(int flags) |
LEVEL EDF_register_level(int flags) |
{ |
LEVEL l; /* the level that we register */ |
EDF_level_des *lev; /* for readableness only */ |
687,58 → 484,34 |
printk("EDF_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(EDF_level_des)); |
printk(" alloco descrittore %d %d\n",l,(int)sizeof(EDF_level_des)); |
lev = (EDF_level_des *)level_table[l]; |
/* alloc the space needed for the EDF_level_des */ |
lev = (EDF_level_des *)kern_alloc(sizeof(EDF_level_des)); |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, EDF_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = EDF_LEVEL_CODE; |
lev->l.level_version = EDF_LEVEL_VERSION; |
lev->l.private_insert = EDF_private_insert; |
lev->l.private_extract = EDF_private_extract; |
lev->l.private_dispatch = EDF_private_dispatch; |
lev->l.private_epilogue = EDF_private_epilogue; |
lev->l.level_accept_task_model = EDF_level_accept_task_model; |
lev->l.level_accept_guest_model = EDF_level_accept_guest_model; |
lev->l.level_status = EDF_level_status; |
lev->l.level_scheduler = EDF_level_scheduler; |
lev->l.public_scheduler = EDF_public_scheduler; |
if (flags & EDF_ENABLE_GUARANTEE) |
lev->l.level_guarantee = EDF_level_guarantee; |
lev->l.public_guarantee = EDF_public_guarantee; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.task_create = EDF_task_create; |
lev->l.task_detach = EDF_task_detach; |
lev->l.task_eligible = EDF_task_eligible; |
lev->l.task_dispatch = EDF_task_dispatch; |
lev->l.task_epilogue = EDF_task_epilogue; |
lev->l.task_activate = EDF_task_activate; |
lev->l.task_insert = EDF_task_insert; |
lev->l.task_extract = EDF_task_extract; |
lev->l.task_endcycle = EDF_task_endcycle; |
lev->l.task_end = EDF_task_end; |
lev->l.task_sleep = EDF_task_sleep; |
lev->l.task_delay = EDF_task_delay; |
lev->l.public_create = EDF_public_create; |
lev->l.public_detach = EDF_public_detach; |
lev->l.public_end = EDF_public_end; |
lev->l.public_dispatch = EDF_public_dispatch; |
lev->l.public_epilogue = EDF_public_epilogue; |
lev->l.public_activate = EDF_public_activate; |
lev->l.public_unblock = EDF_public_unblock; |
lev->l.public_block = EDF_public_block; |
lev->l.public_message = EDF_public_message; |
lev->l.guest_create = EDF_guest_create; |
lev->l.guest_detach = EDF_guest_detach; |
lev->l.guest_dispatch = EDF_guest_dispatch; |
lev->l.guest_epilogue = EDF_guest_epilogue; |
lev->l.guest_activate = EDF_guest_activate; |
lev->l.guest_insert = EDF_guest_insert; |
lev->l.guest_extract = EDF_guest_extract; |
lev->l.guest_endcycle = EDF_guest_endcycle; |
lev->l.guest_end = EDF_guest_end; |
lev->l.guest_sleep = EDF_guest_sleep; |
lev->l.guest_delay = EDF_guest_delay; |
/* fill the EDF descriptor part */ |
for(i=0; i<MAX_PROC; i++) { |
lev->period[i] = 0; |
746,18 → 519,17 |
lev->flag[i] = 0; |
} |
lev->ready = NIL; |
iq_init(&lev->ready, &freedesc, 0); |
lev->flags = flags & 0x07; |
lev->U = 0; |
return l; |
} |
bandwidth_t EDF_usedbandwidth(LEVEL l) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
if (lev->l.level_code == EDF_LEVEL_CODE && |
lev->l.level_version == EDF_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
/shark/tags/rel_0_4/kernel/modules/trace.c |
---|
38,11 → 38,11 |
*/ |
/* |
* CVS : $Id: trace.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
* CVS : $Id: trace.c,v 1.3 2003-01-07 17:07:51 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:52 $ |
* Revision: $Revision: 1.3 $ |
* Last update: $Date: 2003-01-07 17:07:51 $ |
*/ |
#include <ll/sys/types.h> |
58,6 → 58,7 |
#include <bits/limits.h> |
/* maximum number of different queues where we want to log our events */ |
#define TRC_MAXQUEUES 5 |
/* |
64,8 → 65,11 |
* |
*/ |
/* this is the base path that is used as a prologue for all the |
filenames that are passed to the tracer */ |
static char basepath[PATH_MAX]; |
/* used to create the name for a tracer file */ |
void trc_create_name(char *basename, int uniq, char *pathname) |
{ |
if (uniq) sprintf(pathname,"%s/%s%i",basepath,basename,uniq); |
76,27 → 80,42 |
* |
*/ |
/* the flag used to discriminate if an event have to be traced or not */ |
#define FLAG_NOTRACE 0x01 |
typedef struct TAGtrc_evtinfo_t { |
trc_queue_t *queue; |
unsigned flags; |
trc_queue_t *queue; /* the queue responsible for the logging of an event */ |
unsigned flags; /* if = FLAG_NOTRACE the event must not be logged */ |
} trc_evtinfo_t; |
/* -- */ |
/* one entry for each event; this array says for each event the queue to use |
and if it must be logged */ |
trc_evtinfo_t eventstable[TRC_NUMEVENTS]; |
/* For each kind of queue (see include/tracer/queues.h) there is a set of |
pointers to the functions that a queue should implement */ |
int (*createqueue[TRC_QUEUETYPESNUMBER])(trc_queue_t *, void *); |
int (*activatequeue[TRC_QUEUETYPESNUMBER])(void *,int); |
int (*terminatequeue[TRC_QUEUETYPESNUMBER])(void *); |
/* for each queue registered in the system, |
the functions used to get/post an event |
The elements of this table are initialized with calls to createqueue[type]() |
(see include/trace/queues.h) */ |
trc_queue_t queuetable[TRC_MAXQUEUES]; |
/* initialized as a dummy queue, the default value of all the queues */ |
trc_queue_t queuesink; |
/* number of registered queues in the system */ |
int numqueues; |
/* -- */ |
/* The Dummy queue */ |
static trc_event_t *dummy_get(void *foo) |
{ |
return NULL; |
127,6 → 146,8 |
/* -- */ |
/* this function simply register the functions that are used to |
handle a queue */ |
int trc_register_queuetype(int queuetype, |
int(*creat)(trc_queue_t *, void *), |
int(*activate)(void *,int), |
139,6 → 160,11 |
return 0; |
} |
/* this function register a queue in the system. |
It uses the type to access to the queue handling functions registered |
with the previous function (trc_register_queuetype) |
numqueue is incremented! |
*/ |
int trc_create_queue(int queuetype, void *args) |
{ |
int res; |
186,20 → 212,28 |
printk(KERN_INFO "initializing tracer..."); |
/* all the queues are initialized to the dummy queue (sink!) */ |
for (i=0;i<TRC_QUEUETYPESNUMBER;i++) { |
createqueue[i]=dummy_createqueue; |
terminatequeue[i]=dummy_terminatequeue; |
} |
/* the sink queue is initialized */ |
dummy_createqueue(&queuesink,NULL); |
/* no queues registered yet */ |
numqueues=0; |
/* all the events are initialized to put to the sink queue */ |
for (i=0;i<TRC_NUMEVENTS;i++) { |
eventstable[i].queue=&queuesink; |
eventstable[i].flags=FLAG_NOTRACE; |
} |
/* this will end the tracer at shutdown */ |
i=sys_atrunlevel(trc_end,NULL,RUNLEVEL_SHUTDOWN); |
/* initialize the parameters if not initialized */ |
{ |
TRC_PARMS m; |
trc_default_parms(m); |
212,10 → 246,13 |
trc_suspend=internal_trc_suspend; |
trc_resume=internal_trc_resume; |
/* start the tracer */ |
trc_resume(); |
return 0; |
} |
/* this function simply activates all the registered queues. |
This is usually called into the init() tasks!!! */ |
int TRC_init_phase2(void) |
{ |
int i; |
224,6 → 261,8 |
return 0; |
} |
/* saves the current logevent function and set it as |
the internal_trc_logevent */ |
static int internal_trc_resume(void) |
{ |
SYS_FLAGS f; |
238,6 → 277,8 |
return ret; |
} |
/* restores the saved logevent function (initially, the logevent function is |
a dummy function) */ |
static int internal_trc_suspend(void) |
{ |
SYS_FLAGS f; |
258,8 → 299,10 |
trc_queue_t *queue; |
SYS_FLAGS f; |
/* disables interrupts (this function can be called also into a task */ |
f=kern_fsave(); |
/* check if the event has to be logged */ |
if (eventstable[event].flags&FLAG_NOTRACE) { |
kern_frestore(f); |
return; |
266,10 → 309,11 |
} |
queue=eventstable[event].queue; |
/* gets a free event descriptor, fills it and post it */ |
evt=queue->get(queue->data); |
if (evt!=NULL) { |
evt->event=event; |
evt->time=ll_gettime(TIME_EXACT,NULL); |
evt->time=kern_gettime(NULL); |
memcpy(&evt->x,ptr,sizeof(trc_allevents_t)); |
queue->post(queue->data); |
} |
283,6 → 327,10 |
* |
*/ |
/* these set of functions can be used to trace or not single event and classes. |
They make use of the classtable structure, that is used to discriminate |
the indexes occupied by every class */ |
int classtable[TRC_NUMCLASSES+1]={ |
TRC_F_TRACER, |
TRC_F_SYSTEM, |
353,21 → 401,29 |
{ |
int qf,qc; |
int res; |
/* initialize the trace */ |
res=TRC_init_phase1(NULL); |
if (res) return res; |
/* register two kinds of queues, fixed and circular */ |
res=trc_register_circular_queue(); |
if (res) return res; |
res=trc_register_fixed_queue(); |
if (res) return res; |
/* creates two queues: |
a circular queue for the system events, |
a fixed queue |
*/ |
qc=trc_create_queue(TRC_CIRCULAR_QUEUE,NULL); |
qf=trc_create_queue(TRC_FIXED_QUEUE,NULL); |
if (qc==-1||qf==-1) return -97; |
/* We want to trace all the system events */ |
res=trc_trace_class(TRC_CLASS_SYSTEM); |
if (res) return res; |
/* All the system events must be traced into the circular queue */ |
res=trc_assign_class_to_queue(TRC_CLASS_SYSTEM,qc); |
if (res) return res; |
/shark/tags/rel_0_4/kernel/modules/posix.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: posix.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: posix.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the scheduling module compatible with POSIX |
63,10 → 63,10 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/*+ Status used in the level +*/ |
#define POSIX_READY MODULE_STATUS_BASE |
#define POSIX_DELAY MODULE_STATUS_BASE+1 |
/*+ the level redefinition for the Round Robin level +*/ |
typedef struct { |
73,8 → 73,10 |
level_des l; /*+ the standard level descriptor +*/ |
int nact[MAX_PROC]; /*+ number of pending activations +*/ |
int priority[MAX_PROC]; /*+ priority of each task +*/ |
QQUEUE *ready; /*+ the ready queue array +*/ |
IQUEUE *ready; /*+ the ready queue array +*/ |
int slice; /*+ the level's time slice +*/ |
87,73 → 89,11 |
} POSIX_level_des; |
static char *POSIX_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case POSIX_READY: return "POSIX_Ready"; |
case POSIX_DELAY: return "POSIX_Delay"; |
default : return "POSIX_Unknown"; |
} |
} |
/*+ this function is called when a task finish his delay +*/ |
static void POSIX_timer_delay(void *par) |
{ |
PID p = (PID) par; |
POSIX_level_des *lev; |
lev = (POSIX_level_des *)level_table[proc_table[p].task_level]; |
proc_table[p].status = POSIX_READY; |
qq_insertlast(p,&lev->ready[proc_table[p].priority]); |
proc_table[p].delay_timer = NIL; /* Paranoia */ |
// kern_printf(" DELAY TIMER %d ", p); |
event_need_reschedule(); |
} |
static int POSIX_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l)) |
return 0; |
else |
return -1; |
} |
static int POSIX_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static void POSIX_level_status(LEVEL l) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
PID p; |
kern_printf("Slice: %d \n", lev->slice); |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != POSIX_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %d\t Name: %20s Prio: %3ld Status: %s\n", |
p,proc_table[p].name, |
proc_table[p].priority, |
POSIX_status_to_a(proc_table[p].status)); |
} |
/* This is not efficient but very fair :-) |
The need of all this stuff is because if a task execute a long time |
due to (shadow!) priority inheritance, then the task shall go to the |
tail of the queue many times... */ |
static PID POSIX_level_scheduler(LEVEL l) |
static PID POSIX_public_scheduler(LEVEL l) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
164,7 → 104,7 |
prio = lev->maxpriority; |
for (;;) { |
p = qq_queryfirst(&lev->ready[prio]); |
p = iq_query_first(&lev->ready[prio]); |
if (p == NIL) { |
if (prio) { |
prio--; |
177,8 → 117,8 |
if ((proc_table[p].control & CONTROL_CAP) && |
(proc_table[p].avail_time <= 0)) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_extract(p,&lev->ready[prio]); |
qq_insertlast(p,&lev->ready[prio]); |
iq_extract(p,&lev->ready[prio]); |
iq_insertlast(p,&lev->ready[prio]); |
} |
else |
return p; |
185,19 → 125,15 |
} |
} |
static int POSIX_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int POSIX_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
/* the POSIX level always guarantee... the function is defined because |
there can be an aperiodic server at a level with less priority than |
the POSIX that need guarantee (e.g., a TBS server) */ |
return 1; |
} |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
NRT_TASK_MODEL *nrt; |
if (m->pclass != NRT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
static int POSIX_task_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m; |
nrt = (NRT_TASK_MODEL *)m; |
/* the task state is set at SLEEP by the general task_create */ |
208,7 → 144,7 |
proc_table[exec_shadow].task_level == l) { |
/* We inherit the scheduling properties if the scheduling level |
*is* the same */ |
proc_table[p].priority = proc_table[exec_shadow].priority; |
lev->priority[p] = lev->priority[exec_shadow]; |
proc_table[p].avail_time = proc_table[exec_shadow].avail_time; |
proc_table[p].wcet = proc_table[exec_shadow].wcet; |
219,7 → 155,7 |
lev->nact[p] = (lev->nact[exec_shadow] == -1) ? -1 : 0; |
} |
else { |
proc_table[p].priority = nrt->weight; |
lev->priority[p] = nrt->weight; |
if (nrt->slice) { |
proc_table[p].avail_time = nrt->slice; |
242,54 → 178,23 |
return 0; /* OK */ |
} |
static void POSIX_task_detach(LEVEL l, PID p) |
static void POSIX_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the POSIX level doesn't introduce any new field in the TASK_MODEL |
so, all detach stuffs are done by the task_create |
The task state is set at FREE by the general task_create */ |
} |
static int POSIX_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void POSIX_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
qq_extract(p, &lev->ready[proc_table[p].priority]); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
iq_extract(p, &lev->ready[lev->priority[p]]); |
} |
static void POSIX_task_epilogue(LEVEL l, PID p) |
static void POSIX_public_epilogue(LEVEL l, PID p) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
if (lev->yielding) { |
lev->yielding = 0; |
qq_insertlast(p,&lev->ready[proc_table[p].priority]); |
iq_insertlast(p,&lev->ready[lev->priority[p]]); |
} |
/* check if the slice is finished and insert the task in the coPOSIXect |
qqueue position */ |
296,15 → 201,15 |
else if (proc_table[p].control & CONTROL_CAP && |
proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_insertlast(p,&lev->ready[proc_table[p].priority]); |
iq_insertlast(p,&lev->ready[lev->priority[p]]); |
} |
else |
qq_insertfirst(p,&lev->ready[proc_table[p].priority]); |
iq_insertfirst(p,&lev->ready[lev->priority[p]]); |
proc_table[p].status = POSIX_READY; |
} |
static void POSIX_task_activate(LEVEL l, PID p) |
static void POSIX_public_activate(LEVEL l, PID p) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
316,26 → 221,24 |
return; |
} |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
/* Insert task in the correct position */ |
proc_table[p].status = POSIX_READY; |
qq_insertlast(p,&lev->ready[proc_table[p].priority]); |
iq_insertlast(p,&lev->ready[lev->priority[p]]); |
} |
static void POSIX_task_insert(LEVEL l, PID p) |
static void POSIX_public_unblock(LEVEL l, PID p) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
/* Similar to POSIX_task_activate, but we don't check in what state |
the task is and we don't set the request_time */ |
the task is */ |
/* Insert task in the coPOSIXect position */ |
proc_table[p].status = POSIX_READY; |
qq_insertlast(p,&lev->ready[proc_table[p].priority]); |
iq_insertlast(p,&lev->ready[lev->priority[p]]); |
} |
static void POSIX_task_extract(LEVEL l, PID p) |
static void POSIX_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
347,22 → 250,26 |
*/ |
} |
static void POSIX_task_endcycle(LEVEL l, PID p) |
static int POSIX_public_message(LEVEL l, PID p, void *m) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
if (lev->nact[p] > 0) { |
/* continue!!!! */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
lev->nact[p]--; |
qq_insertfirst(p,&lev->ready[proc_table[p].priority]); |
iq_insertfirst(p,&lev->ready[lev->priority[p]]); |
proc_table[p].status = POSIX_READY; |
} |
else |
proc_table[p].status = SLEEP; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
return 0; |
} |
static void POSIX_task_end(LEVEL l, PID p) |
static void POSIX_public_end(LEVEL l, PID p) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
370,69 → 277,9 |
/* then, we insert the task in the free queue */ |
proc_table[p].status = FREE; |
q_insert(p,&freedesc); |
iq_priority_insert(p,&freedesc); |
} |
static void POSIX_task_sleep(LEVEL l, PID p) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
lev->nact[p] = 0; |
proc_table[p].status = SLEEP; |
} |
static void POSIX_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
// POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
struct timespec wakeuptime; |
/* equal to POSIX_task_endcycle */ |
proc_table[p].status = POSIX_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT,&wakeuptime); |
ADDUSEC2TIMESPEC(usdelay,&wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
POSIX_timer_delay, |
(void *)p); |
} |
static int POSIX_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void POSIX_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
/*+ This init function install the "main" task +*/ |
463,7 → 310,7 |
if (p == NIL) |
printk("\nPanic!!! can't create main task...\n"); |
POSIX_task_activate(lev,p); |
POSIX_public_activate(lev,p); |
} |
471,7 → 318,7 |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void POSIX_register_level(TIME slice, |
LEVEL POSIX_register_level(TIME slice, |
int createmain, |
struct multiboot_info *mb, |
int prioritylevels) |
483,55 → 330,23 |
printk("POSIX_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(POSIX_level_des)); |
printk(" alloco descrittore %d %d\n",l,(int)sizeof(POSIX_level_des)); |
lev = (POSIX_level_des *)level_table[l]; |
/* alloc the space needed for the POSIX_level_des */ |
lev = (POSIX_level_des *)kern_alloc(sizeof(POSIX_level_des)); |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, POSIX_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = POSIX_LEVEL_CODE; |
lev->l.level_version = POSIX_LEVEL_VERSION; |
lev->l.public_scheduler = POSIX_public_scheduler; |
lev->l.public_create = POSIX_public_create; |
lev->l.public_end = POSIX_public_end; |
lev->l.public_dispatch = POSIX_public_dispatch; |
lev->l.public_epilogue = POSIX_public_epilogue; |
lev->l.public_activate = POSIX_public_activate; |
lev->l.public_unblock = POSIX_public_unblock; |
lev->l.public_block = POSIX_public_block; |
lev->l.public_message = POSIX_public_message; |
lev->l.level_accept_task_model = POSIX_level_accept_task_model; |
lev->l.level_accept_guest_model = POSIX_level_accept_guest_model; |
lev->l.level_status = POSIX_level_status; |
lev->l.level_scheduler = POSIX_level_scheduler; |
lev->l.level_guarantee = POSIX_level_guarantee; |
lev->l.task_create = POSIX_task_create; |
lev->l.task_detach = POSIX_task_detach; |
lev->l.task_eligible = POSIX_task_eligible; |
lev->l.task_dispatch = POSIX_task_dispatch; |
lev->l.task_epilogue = POSIX_task_epilogue; |
lev->l.task_activate = POSIX_task_activate; |
lev->l.task_insert = POSIX_task_insert; |
lev->l.task_extract = POSIX_task_extract; |
lev->l.task_endcycle = POSIX_task_endcycle; |
lev->l.task_end = POSIX_task_end; |
lev->l.task_sleep = POSIX_task_sleep; |
lev->l.task_delay = POSIX_task_delay; |
lev->l.guest_create = POSIX_guest_create; |
lev->l.guest_detach = POSIX_guest_detach; |
lev->l.guest_dispatch = POSIX_guest_dispatch; |
lev->l.guest_epilogue = POSIX_guest_epilogue; |
lev->l.guest_activate = POSIX_guest_activate; |
lev->l.guest_insert = POSIX_guest_insert; |
lev->l.guest_extract = POSIX_guest_extract; |
lev->l.guest_endcycle = POSIX_guest_endcycle; |
lev->l.guest_end = POSIX_guest_end; |
lev->l.guest_sleep = POSIX_guest_sleep; |
lev->l.guest_delay = POSIX_guest_delay; |
/* fill the POSIX descriptor part */ |
for (i = 0; i < MAX_PROC; i++) |
lev->nact[i] = -1; |
538,10 → 353,10 |
lev->maxpriority = prioritylevels -1; |
lev->ready = (QQUEUE *)kern_alloc(sizeof(QQUEUE) * prioritylevels); |
lev->ready = (IQUEUE *)kern_alloc(sizeof(IQUEUE) * prioritylevels); |
for (x = 0; x < prioritylevels; x++) |
qq_init(&lev->ready[x]); |
iq_init(&lev->ready[x], &freedesc, 0); |
if (slice < POSIX_MINIMUM_SLICE) slice = POSIX_MINIMUM_SLICE; |
if (slice > POSIX_MAXIMUM_SLICE) slice = POSIX_MAXIMUM_SLICE; |
551,6 → 366,8 |
if (createmain) |
sys_atrunlevel(POSIX_call_main,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
/*+ this function forces the running task to go to his queue tail; |
559,13 → 376,6 |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
if (l < 0 || l >= sched_levels) |
return -1; |
if (level_table[l]->level_code != POSIX_LEVEL_CODE || |
level_table[l]->level_version != POSIX_LEVEL_VERSION ) |
return -1; |
if (proc_table[exec_shadow].task_level != l) |
return -1; |
596,13 → 406,6 |
returns ENOSYS or ESRCH if there are problems +*/ |
int POSIX_getschedparam(LEVEL l, PID p, int *policy, int *priority) |
{ |
if (l < 0 || l >= sched_levels) |
return ENOSYS; |
if (level_table[l]->level_code != POSIX_LEVEL_CODE || |
level_table[l]->level_version != POSIX_LEVEL_VERSION ) |
return ENOSYS; |
if (p<0 || p>= MAX_PROC || proc_table[p].status == FREE) |
return ESRCH; |
614,7 → 417,7 |
else |
*policy = NRT_FIFO_POLICY; |
*priority = proc_table[p].priority; |
*priority = ((POSIX_level_des *)(level_table[l]))->priority[p]; |
return 0; |
} |
624,13 → 427,6 |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
if (l < 0 || l >= sched_levels) |
return ENOSYS; |
if (level_table[l]->level_code != POSIX_LEVEL_CODE || |
level_table[l]->level_version != POSIX_LEVEL_VERSION ) |
return ENOSYS; |
if (p<0 || p>= MAX_PROC || proc_table[p].status == FREE) |
return ESRCH; |
644,14 → 440,14 |
else |
return EINVAL; |
if (proc_table[p].priority != priority) { |
if (lev->priority[p] != priority) { |
if (proc_table[p].status == POSIX_READY) { |
qq_extract(p,&lev->ready[proc_table[p].priority]); |
proc_table[p].priority = priority; |
qq_insertlast(p,&lev->ready[priority]); |
iq_extract(p,&lev->ready[lev->priority[p]]); |
lev->priority[p] = priority; |
iq_insertlast(p,&lev->ready[priority]); |
} |
else |
proc_table[p].priority = priority; |
lev->priority[p] = priority; |
} |
return 0; |
/shark/tags/rel_0_4/kernel/modules/pc.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: pc.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: pc.c,v 1.2 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
Priority Ceiling protocol. see pc.h for more details... |
57,7 → 57,6 |
#include <ll/string.h> |
#include <ll/stdio.h> |
#include <kernel/const.h> |
#include <modules/codes.h> |
#include <sys/types.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
153,7 → 152,7 |
} |
#if 0 |
/*+ print resource protocol statistics...+*/ |
static void PC_resource_status(RLEVEL r) |
{ |
172,23 → 171,24 |
// in the future: print the status of the blocked semaphores! |
} |
#endif |
static int PC_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[l]); |
PC_RES_MODEL *pc; |
static int PC_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
{ |
if (r->rclass == PC_RCLASS || r->rclass == (PC_RCLASS | l) ) |
return 0; |
else |
if (r->rclass != PC_RCLASS) |
return -1; |
} |
if (r->level && r->level !=l) |
return -1; |
static void PC_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[l]); |
PC_RES_MODEL *pc = (PC_RES_MODEL *)r; |
pc = (PC_RES_MODEL *)r; |
m->priority[p] = pc->priority; |
m->nlocked[p] = 0; |
return 0; |
} |
static void PC_res_detach(RLEVEL l, PID p) |
203,18 → 203,13 |
m->priority[p] = MAX_DWORD; |
} |
static int PC_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a) |
{ |
if (a->mclass == PC_MCLASS || a->mclass == (PC_MCLASS | l) ) |
return 0; |
else |
return -1; |
} |
static int PC_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
PC_mutex_t *p; |
if (a->mclass != PC_MCLASS) |
return -1; |
p = (PC_mutex_t *) kern_alloc(sizeof(PC_mutex_t)); |
/* control if there is enough memory; no control on init on a |
403,7 → 398,7 |
return 0; |
} |
void PC_register_module(void) |
RLEVEL PC_register_module(void) |
{ |
RLEVEL l; /* the level that we register */ |
PC_mutex_resource_des *m; /* for readableness only */ |
421,20 → 416,11 |
resource_table[l] = (resource_des *)m; |
/* fill the resource_des descriptor */ |
strncpy(m->m.r.res_name, PC_MODULENAME, MAX_MODULENAME); |
m->m.r.res_code = PC_MODULE_CODE; |
m->m.r.res_version = PC_MODULE_VERSION; |
m->m.r.rtype = MUTEX_RTYPE; |
m->m.r.resource_status = PC_resource_status; |
m->m.r.level_accept_resource_model = PC_level_accept_resource_model; |
m->m.r.res_register = PC_res_register; |
m->m.r.res_detach = PC_res_detach; |
/* fill the mutex_resource_des descriptor */ |
m->m.level_accept_mutexattr = PC_level_accept_mutexattr; |
m->m.init = PC_init; |
m->m.destroy = PC_destroy; |
m->m.lock = PC_lock; |
447,6 → 433,8 |
m->mlist = NULL; |
return l; |
} |
/*+ This function gets the ceiling of a PC mutex, and it have to be called |
461,11 → 449,6 |
r = resource_table[mutex->mutexlevel]; |
if (r->rtype != MUTEX_RTYPE || |
r->res_code != PC_MODULE_CODE || |
r->res_version != PC_MODULE_VERSION) |
return -1; |
if (ceiling) |
*ceiling = ((PC_mutex_t *)mutex->opt)->ceiling; |
else |
486,11 → 469,6 |
r = resource_table[mutex->mutexlevel]; |
if (r->rtype != MUTEX_RTYPE || |
r->res_code != PC_MODULE_CODE || |
r->res_version != PC_MODULE_VERSION) |
return -1; |
if (old_ceiling) |
*old_ceiling = ((PC_mutex_t *)mutex->opt)->ceiling; |
/shark/tags/rel_0_4/kernel/modules/bd_edf.c |
---|
38,11 → 38,11 |
*/ |
/* |
* CVS : $Id: bd_edf.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
* CVS : $Id: bd_edf.c,v 1.2 2003-01-07 17:07:50 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:52 $ |
* Revision: $Revision: 1.2 $ |
* Last update: $Date: 2003-01-07 17:07:50 $ |
*/ |
#include <modules/bd_edf.h> |
51,7 → 51,6 |
#include <ll/string.h> |
#include <ll/stdio.h> |
#include <kernel/const.h> |
#include <modules/codes.h> |
#include <sys/types.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
74,12 → 73,21 |
return -1; |
} |
static void res_register(RLEVEL l, PID p, RES_MODEL *r) |
static int res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
bd_edf_resource_des *m=(bd_edf_resource_des*)(resource_table[l]); |
BDEDF_RES_MODEL *rm=(BDEDF_RES_MODEL*)r; |
BDEDF_RES_MODEL *rm; |
if (r->rclass!=BDEDF_RCLASS) |
return -1; |
if (r->level && r->level !=l) |
return -1; |
rm=(BDEDF_RES_MODEL*)r; |
assertk(mylevel==l); |
m->dl[p]=rm->dl; |
return 0; |
} |
static void res_detach(RLEVEL l, PID p) |
89,10 → 97,7 |
m->dl[p]=0; |
} |
static void res_resource_status(void) |
{} |
void BD_EDF_register_module(void) |
RLEVEL BD_EDF_register_module(void) |
{ |
RLEVEL l; |
bd_edf_resource_des *m; |
108,12 → 113,7 |
resource_table[l]=(resource_des*)m; |
/* fill the resource_des descriptor */ |
strcpy(m->rd.res_name,BDEDF_MODULENAME); |
m->rd.res_code=BDEDF_MODULE_CODE; |
m->rd.res_version=BDEDF_MODULE_VERSION; |
m->rd.rtype=DEFAULT_RTYPE; |
m->rd.resource_status=res_resource_status; |
m->rd.level_accept_resource_model=res_level_accept_resource_model; |
m->rd.res_register=res_register; |
m->rd.res_detach=res_detach; |
121,6 → 121,8 |
assertk(mylevel==-1); |
mylevel=l; |
return l; |
} |
TIME bd_edf_getdl(void) |
/shark/tags/rel_0_4/kernel/modules/srp.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: srp.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: srp.c,v 1.3 2003-01-07 17:07:51 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:51 $ |
------------ |
Stack Resource Policy. see srp.h for general details... |
141,7 → 141,6 |
#include <ll/string.h> |
#include <ll/stdio.h> |
#include <kernel/const.h> |
#include <modules/codes.h> |
#include <sys/types.h> |
#include <kernel/descr.h> |
#include <kernel/var.h> |
385,27 → 384,14 |
} |
/*+ print resource protocol statistics...+*/ |
static void SRP_resource_status(RLEVEL r) |
static int SRP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
kern_printf("SRP status not implemented yet"); |
} |
SRP_mutex_resource_des *m = (SRP_mutex_resource_des *)(resource_table[l]); |
static int SRP_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
{ |
if (r->rclass == SRP_RCLASS || r->rclass == (SRP_RCLASS | l) || |
r->rclass == SRP2_RCLASS || r->rclass == (SRP2_RCLASS | l)) |
return 0; |
else |
if (r->level && r->level !=l) |
return -1; |
} |
static void SRP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
SRP_mutex_resource_des *m = (SRP_mutex_resource_des *)(resource_table[l]); |
if (r->rclass == SRP_RCLASS || r->rclass == (SRP_RCLASS | l)) { |
if (r->rclass == SRP_RCLASS) { |
/* SRP_RES_MODEL resource model */ |
// kern_printf("!%d %d",((SRP_RES_MODEL *)r)->preempt,p); |
429,14 → 415,15 |
} |
m->nlocked[p] = 0; |
return 0; |
} |
else { |
else if (r->rclass == SRP2_RCLASS) { |
/* a mutex passed via SRP_useres() */ |
SRP_mutex_t *mut = (SRP_mutex_t *)r; |
if (mut->use[p]) |
/* the mutex is already registered, do nothing! */ |
return; |
return -1; |
/* register the mutex for the task */ |
mut->use[p] = 1; |
449,7 → 436,10 |
mut->ceiling = m->proc_preempt[p].preempt; |
} |
return 0; |
} |
else |
return -1; |
} |
static void SRP_res_detach(RLEVEL l, PID p) |
488,14 → 478,6 |
SRP_extract_tasklist(m, p); |
} |
static int SRP_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a) |
{ |
if (a->mclass == SRP_MCLASS || a->mclass == (SRP_MCLASS | l) ) |
return 0; |
else |
return -1; |
} |
static int SRP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
SRP_mutex_resource_des *lev = (SRP_mutex_resource_des *)(resource_table[l]); |
502,6 → 484,9 |
SRP_mutex_t *p; |
PID x; |
if (a->mclass != SRP_MCLASS) |
return -1; |
p = (SRP_mutex_t *) kern_alloc(sizeof(SRP_mutex_t)); |
/* control if there is enough memory; no control on init on a |
595,7 → 580,7 |
// lev, mut->owner, |
// mut->use[exec_shadow], |
// lev->proc_preempt[exec_shadow].preempt,exec_shadow); |
kern_raise(XSRP_UNVALID_LOCK, exec_shadow); |
kern_raise(XSRP_INVALID_LOCK, exec_shadow); |
kern_sti(); |
return (EINVAL); |
} |
719,7 → 704,7 |
/* activate the task if it was activated while in lobby list! */ |
if (task_unblock_activation(x)) { |
LEVEL sl = proc_table[x].task_level; |
level_table[sl]->task_activate(sl,x); |
level_table[sl]->public_activate(sl,x); |
// kern_printf("activate it!!!"); |
} |
} |
736,7 → 721,7 |
return 0; |
} |
void SRP_register_module(void) |
RLEVEL SRP_register_module(void) |
{ |
RLEVEL l; /* the level that we register */ |
SRP_mutex_resource_des *m; /* for readableness only */ |
754,20 → 739,11 |
resource_table[l] = (resource_des *)m; |
/* fill the resource_des descriptor */ |
strncpy(m->m.r.res_name, SRP_MODULENAME, MAX_MODULENAME); |
m->m.r.res_code = SRP_MODULE_CODE; |
m->m.r.res_version = SRP_MODULE_VERSION; |
m->m.r.rtype = MUTEX_RTYPE; |
m->m.r.resource_status = SRP_resource_status; |
m->m.r.level_accept_resource_model = SRP_level_accept_resource_model; |
m->m.r.res_register = SRP_res_register; |
m->m.r.res_detach = SRP_res_detach; |
/* fill the mutex_resource_des descriptor */ |
m->m.level_accept_mutexattr = SRP_level_accept_mutexattr; |
m->m.init = SRP_init; |
m->m.destroy = SRP_destroy; |
m->m.lock = SRP_lock; |
789,5 → 765,7 |
m->srpstack = NULL; |
m->srprecalc = NULL; |
m->srplist = NULL; |
return l; |
} |
/shark/tags/rel_0_4/kernel/modules/rr2.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: rr2.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: rr2.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the scheduling module RR2 (Round Robin) version 2 |
60,10 → 60,10 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/*+ Status used in the level +*/ |
#define RR2_READY MODULE_STATUS_BASE |
#define RR2_DELAY MODULE_STATUS_BASE+1 |
/*+ the level redefinition for the Round Robin level +*/ |
typedef struct { |
71,7 → 71,7 |
int nact[MAX_PROC]; /*+ number of pending activations +*/ |
QQUEUE ready; /*+ the ready queue +*/ |
IQUEUE ready; /*+ the ready queue +*/ |
int slice; /*+ the level's time slice +*/ |
80,77 → 80,11 |
} RR2_level_des; |
static char *RR2_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case RR2_READY: return "RR2_Ready"; |
case RR2_DELAY: return "RR2_Delay"; |
default : return "RR2_Unknown"; |
} |
} |
/*+ this function is called when a task finish his delay +*/ |
static void RR2_timer_delay(void *par) |
{ |
PID p = (PID) par; |
RR2_level_des *lev; |
lev = (RR2_level_des *)level_table[proc_table[p].task_level]; |
proc_table[p].status = RR2_READY; |
qq_insertlast(p,&lev->ready); |
proc_table[p].delay_timer = NIL; /* Paranoia */ |
// kern_printf(" DELAY TIMER %d ", p); |
event_need_reschedule(); |
} |
static int RR2_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l)) |
return 0; |
else |
return -1; |
} |
static int RR2_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static void RR2_level_status(LEVEL l) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->ready); |
kern_printf("Slice: %d \n", lev->slice); |
while (p != NIL) { |
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name, |
RR2_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != RR2_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name, |
RR2_status_to_a(proc_table[p].status)); |
} |
/* This is not efficient but very fair :-) |
The need of all this stuff is because if a task execute a long time |
due to (shadow!) priority inheritance, then the task shall go to the |
tail of the queue many times... */ |
static PID RR2_level_scheduler(LEVEL l) |
static PID RR2_public_scheduler(LEVEL l) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
157,14 → 91,14 |
PID p; |
for (;;) { |
p = qq_queryfirst(&lev->ready); |
p = iq_query_first(&lev->ready); |
if (p == -1) |
return p; |
if (proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_extract(p,&lev->ready); |
qq_insertlast(p,&lev->ready); |
iq_extract(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
else |
return p; |
171,20 → 105,15 |
} |
} |
static int RR2_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int RR2_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
/* the RR2 level always guarantee... the function is defined because |
there can be an aperiodic server at a level with less priority than |
the RR2 that need guarantee (e.g., a TBS server) */ |
return 1; |
} |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
NRT_TASK_MODEL *nrt; |
if (m->pclass != NRT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
nrt = (NRT_TASK_MODEL *)m; |
static int RR2_task_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m; |
/* the task state is set at SLEEP by the general task_create |
the only thing to set remains the capacity stuffs that are set |
to the values passed in the model... */ |
210,48 → 139,17 |
return 0; /* OK */ |
} |
static void RR2_task_detach(LEVEL l, PID p) |
static void RR2_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the RR2 level doesn't introduce any new field in the TASK_MODEL |
so, all detach stuffs are done by the task_create |
The task state is set at FREE by the general task_create */ |
} |
static int RR2_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void RR2_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
qq_extract(p, &lev->ready); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
iq_extract(p, &lev->ready); |
} |
static void RR2_task_epilogue(LEVEL l, PID p) |
static void RR2_public_epilogue(LEVEL l, PID p) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
259,16 → 157,16 |
qqueue position */ |
if (proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
else |
/* cuRR2 is >0, so the running task have to run for another cuRR2 usec */ |
qq_insertfirst(p,&lev->ready); |
iq_insertfirst(p,&lev->ready); |
proc_table[p].status = RR2_READY; |
} |
static void RR2_task_activate(LEVEL l, PID p) |
static void RR2_public_activate(LEVEL l, PID p) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
280,26 → 178,24 |
return; |
} |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
/* Insert task in the coRR2ect position */ |
proc_table[p].status = RR2_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
static void RR2_task_insert(LEVEL l, PID p) |
static void RR2_public_unblock(LEVEL l, PID p) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
/* Similar to RR2_task_activate, but we don't check in what state |
the task is and we don't set the request_time */ |
/* Similar to RR2_task_activate, |
but we don't check in what state the task is */ |
/* Insert task in the coRR2ect position */ |
proc_table[p].status = RR2_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
static void RR2_task_extract(LEVEL l, PID p) |
static void RR2_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
311,22 → 207,26 |
*/ |
} |
static void RR2_task_endcycle(LEVEL l, PID p) |
static int RR2_public_message(LEVEL l, PID p, void *m) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
if (lev->nact[p] > 0) { |
/* continue!!!! */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
lev->nact[p]--; |
qq_insertfirst(p,&lev->ready); |
iq_insertfirst(p,&lev->ready); |
proc_table[p].status = RR2_READY; |
} |
else |
proc_table[p].status = SLEEP; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
return 0; |
} |
static void RR2_task_end(LEVEL l, PID p) |
static void RR2_public_end(LEVEL l, PID p) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
334,69 → 234,9 |
/* then, we insert the task in the free queue */ |
proc_table[p].status = FREE; |
q_insert(p,&freedesc); |
iq_insertlast(p,&freedesc); |
} |
static void RR2_task_sleep(LEVEL l, PID p) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
if (lev->nact[p] >= 0) lev->nact[p] = 0; |
proc_table[p].status = SLEEP; |
} |
static void RR2_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
// RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
struct timespec wakeuptime; |
/* equal to RR2_task_endcycle */ |
proc_table[p].status = RR2_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT,&wakeuptime); |
ADDUSEC2TIMESPEC(usdelay,&wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
RR2_timer_delay, |
(void *)p); |
} |
static int RR2_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void RR2_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
/*+ This init function install the "main" task +*/ |
424,7 → 264,7 |
if (p == NIL) |
printk("\nPanic!!! can't create main task...\n"); |
RR2_task_activate(lev,p); |
RR2_public_activate(lev,p); |
} |
432,11 → 272,11 |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void RR2_register_level(TIME slice, |
LEVEL RR2_register_level(TIME slice, |
int createmain, |
struct multiboot_info *mb) |
{ |
LEVEL l; /* the level that we register */ |
LEVEL l; /* the level that we register */ |
RR2_level_des *lev; /* for readableness only */ |
PID i; |
443,57 → 283,28 |
printk("RR2_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(RR2_level_des)); |
/* alloc the space needed for the RR2_level_des */ |
lev = (RR2_level_des *)kern_alloc(sizeof(RR2_level_des)); |
lev = (RR2_level_des *)level_table[l]; |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, RR2_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = RR2_LEVEL_CODE; |
lev->l.level_version = RR2_LEVEL_VERSION; |
lev->l.public_scheduler = RR2_public_scheduler; |
lev->l.public_create = RR2_public_create; |
lev->l.public_end = RR2_public_end; |
lev->l.public_dispatch = RR2_public_dispatch; |
lev->l.public_epilogue = RR2_public_epilogue; |
lev->l.public_activate = RR2_public_activate; |
lev->l.public_unblock = RR2_public_unblock; |
lev->l.public_block = RR2_public_block; |
lev->l.public_message = RR2_public_message; |
lev->l.level_accept_task_model = RR2_level_accept_task_model; |
lev->l.level_accept_guest_model = RR2_level_accept_guest_model; |
lev->l.level_status = RR2_level_status; |
lev->l.level_scheduler = RR2_level_scheduler; |
lev->l.level_guarantee = RR2_level_guarantee; |
lev->l.task_create = RR2_task_create; |
lev->l.task_detach = RR2_task_detach; |
lev->l.task_eligible = RR2_task_eligible; |
lev->l.task_dispatch = RR2_task_dispatch; |
lev->l.task_epilogue = RR2_task_epilogue; |
lev->l.task_activate = RR2_task_activate; |
lev->l.task_insert = RR2_task_insert; |
lev->l.task_extract = RR2_task_extract; |
lev->l.task_endcycle = RR2_task_endcycle; |
lev->l.task_end = RR2_task_end; |
lev->l.task_sleep = RR2_task_sleep; |
lev->l.task_delay = RR2_task_delay; |
lev->l.guest_create = RR2_guest_create; |
lev->l.guest_detach = RR2_guest_detach; |
lev->l.guest_dispatch = RR2_guest_dispatch; |
lev->l.guest_epilogue = RR2_guest_epilogue; |
lev->l.guest_activate = RR2_guest_activate; |
lev->l.guest_insert = RR2_guest_insert; |
lev->l.guest_extract = RR2_guest_extract; |
lev->l.guest_endcycle = RR2_guest_endcycle; |
lev->l.guest_end = RR2_guest_end; |
lev->l.guest_sleep = RR2_guest_sleep; |
lev->l.guest_delay = RR2_guest_delay; |
/* fill the RR2 descriptor part */ |
for (i = 0; i < MAX_PROC; i++) |
lev->nact[i] = -1; |
qq_init(&lev->ready); |
iq_init(&lev->ready, &freedesc, 0); |
if (slice < RR2_MINIMUM_SLICE) slice = RR2_MINIMUM_SLICE; |
if (slice > RR2_MAXIMUM_SLICE) slice = RR2_MAXIMUM_SLICE; |
503,6 → 314,8 |
if (createmain) |
sys_atrunlevel(RR2_call_main,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
/shark/tags/rel_0_4/kernel/modules/ds.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: ds.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: ds.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the aperiodic server DS (Deferrable Server) |
64,6 → 64,7 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/*+ Status used in the level +*/ |
#define DS_WAIT APER_STATUS_BASE /*+ waiting the service +*/ |
83,7 → 84,7 |
int Cs; /*+ server capacity +*/ |
int availCs; /*+ server avail time +*/ |
QQUEUE wait; /*+ the wait queue of the DS +*/ |
IQUEUE wait; /*+ the wait queue of the DS +*/ |
PID activated; /*+ the task inserted in another queue +*/ |
int flags; /*+ the init flags... +*/ |
106,8 → 107,7 |
m = lev->scheduling_level; |
job_task_default_model(j,lev->lastdline); |
job_task_def_period(j,lev->period); |
level_table[m]->guest_create(m,p,(TASK_MODEL *)&j); |
level_table[m]->guest_activate(m,p); |
level_table[m]->private_insert(m,p,(TASK_MODEL *)&j); |
// kern_printf("(%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec); |
} |
128,8 → 128,8 |
was not any other task to be put in the ready queue |
... we are now activating the next task */ |
if (lev->availCs > 0 && lev->activated == NIL) { |
if (qq_queryfirst(&lev->wait) != NIL) { |
lev->activated = qq_getfirst(&lev->wait); |
if (iq_query_first(&lev->wait) != NIL) { |
lev->activated = iq_getfirst(&lev->wait); |
DS_activation(lev); |
event_need_reschedule(); |
} |
139,80 → 139,8 |
// kern_printf("!"); |
} |
static char *DS_status_to_a(WORD status) |
static PID DS_public_schedulerbackground(LEVEL l) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case DS_WAIT : return "DS_Wait"; |
default : return "DS_Unknown"; |
} |
} |
static int DS_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) { |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
if (s->periodicity == APERIODIC) |
return 0; |
} |
return -1; |
} |
static int DS_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
static void DS_level_status(LEVEL l) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->wait); |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & DS_ENABLE_GUARANTEE_EDF || |
lev->flags & DS_ENABLE_GUARANTEE_RM )); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
if (lev->activated != -1) |
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n", |
lev->activated, |
proc_table[lev->activated].name, |
proc_table[lev->activated].timespec_priority.tv_sec, |
proc_table[lev->activated].timespec_priority.tv_nsec, |
lev->nact[lev->activated], |
DS_status_to_a(proc_table[lev->activated].status)); |
while (p != NIL) { |
kern_printf("Pid: %2d Name: %10s Stat: %s\n", |
p, |
proc_table[p].name, |
DS_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
} |
static PID DS_level_scheduler(LEVEL l) |
{ |
/* the DS don't schedule anything... |
it's an EDF level or similar that do it! */ |
return NIL; |
} |
static PID DS_level_schedulerbackground(LEVEL l) |
{ |
/* the DS catch the background time to exec aperiodic activities */ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
221,11 → 149,11 |
if (lev->flags & DS_BACKGROUND_BLOCK) |
return NIL; |
else |
return qq_queryfirst(&lev->wait); |
return iq_query_first(&lev->wait); |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int DS_level_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth) |
static int DS_public_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
237,7 → 165,7 |
return 0; |
} |
static int DS_level_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth) |
static int DS_public_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
249,14 → 177,19 |
return 0; |
} |
static int DS_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int DS_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
/* if the DS_task_create is called, then the pclass must be a |
valid pclass. */ |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
SOFT_TASK_MODEL *s; |
if (m->pclass != SOFT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (s->periodicity != APERIODIC) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (s->arrivals == SAVE_ARRIVALS) |
lev->nact[p] = 0; |
else |
265,26 → 198,8 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
static void DS_task_detach(LEVEL l, PID p) |
static void DS_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the DS level doesn't introduce any dinamic allocated new field. */ |
} |
static int DS_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void DS_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
struct timespec ty; |
294,13 → 209,13 |
to exe before calling task_dispatch. we have to check |
lev->activated != p instead */ |
if (lev->activated != p) { |
qq_extract(p, &lev->wait); |
iq_extract(p, &lev->wait); |
//kern_printf("#%d#",p); |
} |
else { |
//if (nostop) kern_printf("(gd status=%d)",proc_table[p].status); |
level_table[ lev->scheduling_level ]-> |
guest_dispatch(lev->scheduling_level,p,nostop); |
private_dispatch(lev->scheduling_level,p,nostop); |
} |
/* set the capacity timer */ |
311,19 → 226,9 |
} |
// kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
} |
static void DS_task_epilogue(LEVEL l, PID p) |
static void DS_public_epilogue(LEVEL l, PID p) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
struct timespec ty; |
355,8 → 260,8 |
task point the shadow to it!!!*/ |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
qq_insertfirst(p, &lev->wait); |
private_extract(lev->scheduling_level,p); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = DS_WAIT; |
lev->activated = NIL; |
} |
365,14 → 270,14 |
wait queue by calling the guest_epilogue... */ |
if (lev->activated == p) {//kern_printf("Û1"); |
level_table[ lev->scheduling_level ]-> |
guest_epilogue(lev->scheduling_level,p); |
private_epilogue(lev->scheduling_level,p); |
} else { //kern_printf("Û2"); |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = DS_WAIT; |
} |
} |
static void DS_task_activate(LEVEL l, PID p) |
static void DS_public_activate(LEVEL l, PID p) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
381,7 → 286,6 |
lev->nact[p]++; |
} |
else if (proc_table[p].status == SLEEP) { |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
if (lev->activated == NIL && lev->availCs > 0) { |
lev->activated = p; |
388,7 → 292,7 |
DS_activation(lev); |
} |
else { |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
proc_table[p].status = DS_WAIT; |
} |
} |
398,7 → 302,7 |
} |
static void DS_task_insert(LEVEL l, PID p) |
static void DS_public_unblock(LEVEL l, PID p) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
408,11 → 312,11 |
/* when we reinsert the task into the system, the server capacity |
is always 0 because nobody executes with the DS before... */ |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = DS_WAIT; |
} |
static void DS_task_extract(LEVEL l, PID p) |
static void DS_public_block(LEVEL l, PID p) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
423,10 → 327,10 |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
} |
static void DS_task_endcycle(LEVEL l, PID p) |
static int DS_public_message(LEVEL l, PID p, void *m) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
struct timespec ty; |
443,52 → 347,30 |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
else |
qq_extract(p, &lev->wait); |
iq_extract(p, &lev->wait); |
if (lev->nact[p] > 0) |
{ |
lev->nact[p]--; |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
proc_table[p].status = DS_WAIT; |
} |
else |
proc_table[p].status = SLEEP; |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
DS_activation(lev); |
} |
static void DS_task_end(LEVEL l, PID p) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
struct timespec ty; |
TIME tx; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
/* update the server capacity */ |
if (lev->flags & DS_BACKGROUND) |
lev->flags &= ~DS_BACKGROUND; |
else { |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
lev->availCs -= tx; |
} |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
lev->activated = qq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
DS_activation(lev); |
return 0; |
} |
static void DS_task_sleep(LEVEL l, PID p) |
static void DS_public_end(LEVEL l, PID p) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
struct timespec ty; |
503,78 → 385,18 |
lev->availCs -= tx; |
} |
if (lev->nact[p] >= 0) lev->nact[p] = 0; |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
else |
qq_extract(p, &lev->wait); |
private_extract(lev->scheduling_level,p); |
proc_table[p].status = SLEEP; |
proc_table[p].status = FREE; |
iq_insertfirst(p,&freedesc); |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
DS_activation(lev); |
} |
static void DS_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
struct timespec ty; |
TIME tx; |
/* update the server capacity */ |
if (lev->flags & DS_BACKGROUND) |
lev->flags &= ~DS_BACKGROUND; |
else { |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
lev->availCs -= tx; |
} |
/* I hope no delay when owning a mutex... */ |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_delay(lev->scheduling_level,p,usdelay); |
} |
static int DS_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void DS_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
584,7 → 406,7 |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[(LEVEL)l]); |
ll_gettime(TIME_EXACT,&lev->lastdline); |
kern_gettime(&lev->lastdline); |
ADDUSEC2TIMESPEC(lev->period, &lev->lastdline); |
kern_event_post(&lev->lastdline, DS_deadline_timer, l); |
594,7 → 416,7 |
/*+ Registration function: |
int flags the init flags ... see DS.h +*/ |
void DS_register_level(int flags, LEVEL master, int Cs, int per) |
LEVEL DS_register_level(int flags, LEVEL master, int Cs, int per) |
{ |
LEVEL l; /* the level that we register */ |
DS_level_des *lev; /* for readableness only */ |
603,64 → 425,33 |
printk("DS_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(DS_level_des)); |
printk(" alloco descrittore %d %d\n",l,(int)sizeof(DS_level_des)); |
lev = (DS_level_des *)level_table[l]; |
/* alloc the space needed for the DS_level_des */ |
lev = (DS_level_des *)kern_alloc(sizeof(DS_level_des)); |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, DS_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = DS_LEVEL_CODE; |
lev->l.level_version = DS_LEVEL_VERSION; |
lev->l.level_accept_task_model = DS_level_accept_task_model; |
lev->l.level_accept_guest_model = DS_level_accept_guest_model; |
lev->l.level_status = DS_level_status; |
if (flags & DS_ENABLE_BACKGROUND) |
lev->l.level_scheduler = DS_level_schedulerbackground; |
else |
lev->l.level_scheduler = DS_level_scheduler; |
lev->l.public_scheduler = DS_public_schedulerbackground; |
if (flags & DS_ENABLE_GUARANTEE_EDF) |
lev->l.level_guarantee = DS_level_guaranteeEDF; |
lev->l.public_guarantee = DS_public_guaranteeEDF; |
else if (flags & DS_ENABLE_GUARANTEE_RM) |
lev->l.level_guarantee = DS_level_guaranteeRM; |
lev->l.public_guarantee = DS_public_guaranteeRM; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.task_create = DS_task_create; |
lev->l.task_detach = DS_task_detach; |
lev->l.task_eligible = DS_task_eligible; |
lev->l.task_dispatch = DS_task_dispatch; |
lev->l.task_epilogue = DS_task_epilogue; |
lev->l.task_activate = DS_task_activate; |
lev->l.task_insert = DS_task_insert; |
lev->l.task_extract = DS_task_extract; |
lev->l.task_endcycle = DS_task_endcycle; |
lev->l.task_end = DS_task_end; |
lev->l.task_sleep = DS_task_sleep; |
lev->l.task_delay = DS_task_delay; |
lev->l.public_create = DS_public_create; |
lev->l.public_end = DS_public_end; |
lev->l.public_dispatch = DS_public_dispatch; |
lev->l.public_epilogue = DS_public_epilogue; |
lev->l.public_activate = DS_public_activate; |
lev->l.public_unblock = DS_public_unblock; |
lev->l.public_block = DS_public_block; |
lev->l.public_message = DS_public_message; |
lev->l.guest_create = DS_guest_create; |
lev->l.guest_detach = DS_guest_detach; |
lev->l.guest_dispatch = DS_guest_dispatch; |
lev->l.guest_epilogue = DS_guest_epilogue; |
lev->l.guest_activate = DS_guest_activate; |
lev->l.guest_insert = DS_guest_insert; |
lev->l.guest_extract = DS_guest_extract; |
lev->l.guest_endcycle = DS_guest_endcycle; |
lev->l.guest_end = DS_guest_end; |
lev->l.guest_sleep = DS_guest_sleep; |
lev->l.guest_delay = DS_guest_delay; |
/* fill the DS descriptor part */ |
for (i=0; i<MAX_PROC; i++) |
671,7 → 462,7 |
lev->period = per; |
qq_init(&lev->wait); |
iq_init(&lev->wait, &freedesc, 0); |
lev->activated = NIL; |
lev->U = (MAX_BANDWIDTH / per) * Cs; |
681,15 → 472,13 |
lev->flags = flags & 0x07; |
sys_atrunlevel(DS_dline_install,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
bandwidth_t DS_usedbandwidth(LEVEL l) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
if (lev->l.level_code == DS_LEVEL_CODE && |
lev->l.level_version == DS_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
/shark/tags/rel_0_4/kernel/modules/cbs.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: cbs.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: cbs.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the aperiodic server CBS (Total Bandwidth Server) |
76,7 → 76,6 |
/*+ Status used in the level +*/ |
#define CBS_IDLE APER_STATUS_BASE /*+ waiting the activation +*/ |
#define CBS_ZOMBIE APER_STATUS_BASE+1 /*+ waiting the period end +*/ |
#define CBS_DELAY APER_STATUS_BASE+2 /*+ waiting the delay end +*/ |
/*+ task flags +*/ |
#define CBS_SAVE_ARRIVALS 1 |
173,26 → 172,9 |
job_task_default_model(job, lev->cbs_dline[p]); |
job_task_def_noexc(job); |
level_table[ lev->scheduling_level ]-> |
guest_create(lev->scheduling_level, p, (TASK_MODEL *)&job); |
level_table[ lev->scheduling_level ]-> |
guest_activate(lev->scheduling_level, p); |
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job); |
} |
static char *CBS_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case CBS_IDLE : return "CBS_Idle"; |
case CBS_ZOMBIE : return "CBS_Zombie"; |
case CBS_DELAY : return "CBS_Delay"; |
default : return "CBS_Unknown"; |
} |
} |
static void CBS_avail_time_check(CBS_level_des *lev, PID p) |
{ |
/* there is a while because if the wcet is << than the system tick |
253,20 → 235,6 |
} |
/*+ this function is called when a task finish his delay +*/ |
static void CBS_timer_delay(void *par) |
{ |
PID p = (PID) par; |
CBS_level_des *lev; |
lev = (CBS_level_des *)level_table[proc_table[p].task_level]; |
CBS_activation(lev,p,&proc_table[p].timespec_priority); |
event_need_reschedule(); |
} |
/*+ this function is called when a killed or ended task reach the |
period end +*/ |
static void CBS_timer_zombie(void *par) |
278,7 → 246,7 |
/* we finally put the task in the ready queue */ |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
iq_insertfirst(p,&freedesc); |
/* and free the allocated bandwidth */ |
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet; |
286,60 → 254,8 |
} |
static int CBS_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l)) { |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
if (s->met && s->period) |
return 0; |
} |
return -1; |
} |
static int CBS_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
static void CBS_level_status(LEVEL l) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
PID p; |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & CBS_ENABLE_GUARANTEE)); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != FREE ) |
kern_printf("Pid: %2d Name: %10s Period: %9ld Dline: %9ld.%6ld Stat: %s\n", |
p, |
proc_table[p].name, |
lev->period[p], |
lev->cbs_dline[p].tv_sec, |
lev->cbs_dline[p].tv_nsec/1000, |
CBS_status_to_a(proc_table[p].status)); |
} |
static PID CBS_level_scheduler(LEVEL l) |
{ |
/* the CBS don't schedule anything... |
it's an EDF level or similar that do it! */ |
return NIL; |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int CBS_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int CBS_public_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
356,14 → 272,18 |
return 0; |
} |
static int CBS_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int CBS_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
SOFT_TASK_MODEL *soft; |
/* if the CBS_task_create is called, then the pclass must be a |
valid pclass. */ |
SOFT_TASK_MODEL *soft = (SOFT_TASK_MODEL *)m; |
if (m->pclass != SOFT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
soft = (SOFT_TASK_MODEL *)m; |
if (!(soft->met && soft->period)) return -1; |
soft = (SOFT_TASK_MODEL *)m; |
/* Enable wcet check */ |
proc_table[p].avail_time = soft->met; |
proc_table[p].wcet = soft->met; |
399,7 → 319,7 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
static void CBS_task_detach(LEVEL l, PID p) |
static void CBS_public_detach(LEVEL l, PID p) |
{ |
/* the CBS level doesn't introduce any dinamic allocated new field. |
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated |
413,7 → 333,7 |
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet; |
} |
static int CBS_task_eligible(LEVEL l, PID p) |
static int CBS_public_eligible(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
JOB_TASK_MODEL job; |
428,7 → 348,7 |
if ( TIMESPEC_A_LT_B(&lev->cbs_dline[p], &schedule_time) ) { |
/* we kill the current activation */ |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level, p); |
private_extract(lev->scheduling_level, p); |
/* we modify the deadline ... */ |
TIMESPEC_ASSIGN(&lev->cbs_dline[p], &schedule_time); |
441,9 → 361,7 |
job_task_default_model(job, lev->cbs_dline[p]); |
job_task_def_noexc(job); |
level_table[ lev->scheduling_level ]-> |
guest_create(lev->scheduling_level, p, (TASK_MODEL *)&job); |
level_table[ lev->scheduling_level ]-> |
guest_activate(lev->scheduling_level, p); |
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job); |
return -1; |
} |
451,32 → 369,14 |
return 0; |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void CBS_task_dispatch(LEVEL l, PID p, int nostop) |
static void CBS_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
level_table[ lev->scheduling_level ]-> |
guest_dispatch(lev->scheduling_level,p,nostop); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
private_dispatch(lev->scheduling_level,p,nostop); |
} |
static void CBS_task_epilogue(LEVEL l, PID p) |
static void CBS_public_epilogue(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
JOB_TASK_MODEL job; |
485,7 → 385,7 |
if ( proc_table[p].avail_time <= 0) { |
/* we kill the current activation */ |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level, p); |
private_extract(lev->scheduling_level, p); |
/* we modify the deadline according to rule 4 ... */ |
CBS_avail_time_check(lev, p); |
494,9 → 394,7 |
job_task_default_model(job, lev->cbs_dline[p]); |
job_task_def_noexc(job); |
level_table[ lev->scheduling_level ]-> |
guest_create(lev->scheduling_level, p, (TASK_MODEL *)&job); |
level_table[ lev->scheduling_level ]-> |
guest_activate(lev->scheduling_level, p); |
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job); |
// kern_printf("epil : dl %d per %d p %d |\n", |
// lev->cbs_dline[p].tv_nsec/1000,lev->period[p],p); |
505,12 → 403,13 |
/* the task has been preempted. it returns into the ready queue by |
calling the guest_epilogue... */ |
level_table[ lev->scheduling_level ]-> |
guest_epilogue(lev->scheduling_level,p); |
private_epilogue(lev->scheduling_level,p); |
} |
static void CBS_task_activate(LEVEL l, PID p) |
static void CBS_public_activate(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
struct timespec t; |
/* save activation (only if needed... */ |
if (proc_table[p].status != SLEEP) { |
519,9 → 418,9 |
return; |
} |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
kern_gettime(&t); |
CBS_activation(lev, p, &proc_table[p].request_time); |
CBS_activation(lev, p, &t); |
/* Set the reactivation timer */ |
if (!(lev->flag[p] & CBS_APERIODIC)) |
530,7 → 429,7 |
the deadline may be != from actual_time + period |
(if we call the task_activate after a task_sleep, and the |
deadline was postponed a lot...) */ |
TIMESPEC_ASSIGN(&lev->reactivation_time[p], &proc_table[p].request_time); |
TIMESPEC_ASSIGN(&lev->reactivation_time[p], &t); |
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]); |
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]); |
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p], |
543,17 → 442,17 |
// kern_printf("act : %d %d |",lev->cbs_dline[p].tv_nsec/1000,p); |
} |
static void CBS_task_insert(LEVEL l, PID p) |
static void CBS_public_unblock(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
struct timespec acttime; |
ll_gettime(TIME_EXACT, &acttime); |
kern_gettime(&acttime); |
CBS_activation(lev,p,&acttime); |
} |
static void CBS_task_extract(LEVEL l, PID p) |
static void CBS_public_block(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
561,10 → 460,10 |
CBS_avail_time_check(lev, p); |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
} |
static void CBS_task_endcycle(LEVEL l, PID p) |
static int CBS_public_message(LEVEL l, PID p, void *m) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
573,24 → 472,27 |
if (lev->nact[p]) { |
/* continue!!!! */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
lev->nact[p]--; |
level_table[ lev->scheduling_level ]-> |
guest_epilogue(lev->scheduling_level,p); |
private_epilogue(lev->scheduling_level,p); |
} |
else { |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
if (lev->flag[p] & CBS_APERIODIC) |
proc_table[p].status = SLEEP; |
else /* the task is soft_periodic */ |
proc_table[p].status = CBS_IDLE; |
} |
} |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
return 0; |
} |
static void CBS_task_end(LEVEL l, PID p) |
static void CBS_public_end(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
598,11 → 500,11 |
CBS_avail_time_check(lev, p); |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
/* we delete the reactivation timer */ |
if (!(lev->flag[p] & CBS_APERIODIC)) { |
event_delete(lev->reactivation_timer[p]); |
kern_event_delete(lev->reactivation_timer[p]); |
lev->reactivation_timer[p] = -1; |
} |
614,98 → 516,11 |
(void *)p); |
} |
static void CBS_task_sleep(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
/* check if the wcet is finished... */ |
CBS_avail_time_check(lev, p); |
/* a task activation is finished, but we are using a JOB_TASK_MODEL |
that implements a single activation, so we have to call |
the guest_end, that representsa single activation... */ |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
/* we delete the reactivation timer */ |
if (!(lev->flag[p] & CBS_APERIODIC)) { |
event_delete(lev->reactivation_timer[p]); |
lev->reactivation_timer[p] = -1; |
} |
proc_table[p].status = SLEEP; |
/* the sleep forgets pending activations... */ |
lev->nact[p] = 0; |
} |
static void CBS_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
struct timespec wakeuptime; |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
/* check if the wcet is finished... */ |
CBS_avail_time_check(lev, p); |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
proc_table[p].status = CBS_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT, &wakeuptime); |
ADDUSEC2TIMESPEC(usdelay, &wakeuptime); |
/* the timespec_priority field is used to store the time at witch the delay |
timer raises */ |
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
CBS_timer_delay, |
(void *)p); |
} |
static int CBS_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void CBS_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
/*+ Registration function: |
int flags the init flags ... see CBS.h +*/ |
void CBS_register_level(int flags, LEVEL master) |
LEVEL CBS_register_level(int flags, LEVEL master) |
{ |
LEVEL l; /* the level that we register */ |
CBS_level_des *lev; /* for readableness only */ |
714,58 → 529,28 |
printk("CBS_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(CBS_level_des)); |
printk(" alloco descrittore %d %d\n",l,(int)sizeof(CBS_level_des)); |
lev = (CBS_level_des *)level_table[l]; |
/* alloc the space needed for the CBS_level_des */ |
lev = (CBS_level_des *)kern_alloc(sizeof(CBS_level_des)); |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, CBS_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = CBS_LEVEL_CODE; |
lev->l.level_version = CBS_LEVEL_VERSION; |
lev->l.level_accept_task_model = CBS_level_accept_task_model; |
lev->l.level_accept_guest_model = CBS_level_accept_guest_model; |
lev->l.level_status = CBS_level_status; |
lev->l.level_scheduler = CBS_level_scheduler; |
if (flags & CBS_ENABLE_GUARANTEE) |
lev->l.level_guarantee = CBS_level_guarantee; |
lev->l.public_guarantee = CBS_public_guarantee; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.public_create = CBS_public_create; |
lev->l.public_detach = CBS_public_detach; |
lev->l.public_end = CBS_public_end; |
lev->l.public_eligible = CBS_public_eligible; |
lev->l.public_dispatch = CBS_public_dispatch; |
lev->l.public_epilogue = CBS_public_epilogue; |
lev->l.public_activate = CBS_public_activate; |
lev->l.public_unblock = CBS_public_unblock; |
lev->l.public_block = CBS_public_block; |
lev->l.public_message = CBS_public_message; |
lev->l.task_create = CBS_task_create; |
lev->l.task_detach = CBS_task_detach; |
lev->l.task_eligible = CBS_task_eligible; |
lev->l.task_dispatch = CBS_task_dispatch; |
lev->l.task_epilogue = CBS_task_epilogue; |
lev->l.task_activate = CBS_task_activate; |
lev->l.task_insert = CBS_task_insert; |
lev->l.task_extract = CBS_task_extract; |
lev->l.task_endcycle = CBS_task_endcycle; |
lev->l.task_end = CBS_task_end; |
lev->l.task_sleep = CBS_task_sleep; |
lev->l.task_delay = CBS_task_delay; |
lev->l.guest_create = CBS_guest_create; |
lev->l.guest_detach = CBS_guest_detach; |
lev->l.guest_dispatch = CBS_guest_dispatch; |
lev->l.guest_epilogue = CBS_guest_epilogue; |
lev->l.guest_activate = CBS_guest_activate; |
lev->l.guest_insert = CBS_guest_insert; |
lev->l.guest_extract = CBS_guest_extract; |
lev->l.guest_endcycle = CBS_guest_endcycle; |
lev->l.guest_end = CBS_guest_end; |
lev->l.guest_sleep = CBS_guest_sleep; |
lev->l.guest_delay = CBS_guest_delay; |
/* fill the CBS descriptor part */ |
for (i=0; i<MAX_PROC; i++) { |
NULL_TIMESPEC(&lev->cbs_dline[i]); |
782,16 → 567,15 |
lev->scheduling_level = master; |
lev->flags = flags & 0x01; |
return l; |
} |
bandwidth_t CBS_usedbandwidth(LEVEL l) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
if (lev->l.level_code == CBS_LEVEL_CODE && |
lev->l.level_version == CBS_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
int CBS_get_nact(LEVEL l, PID p) |
/shark/tags/rel_0_4/kernel/modules/pi.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: pi.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: pi.c,v 1.2 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
Priority Inhertitance protocol. see pi.h for more details... |
56,7 → 56,6 |
#include <ll/ll.h> |
#include <ll/string.h> |
#include <ll/stdio.h> |
#include <modules/codes.h> |
#include <kernel/const.h> |
#include <sys/types.h> |
#include <kernel/descr.h> |
83,6 → 82,7 |
#if 0 |
/*+ print resource protocol statistics...+*/ |
static void PI_resource_status(RLEVEL r) |
{ |
94,19 → 94,14 |
kern_printf("%-4d", m->nlocked[i]); |
} |
} |
#endif |
static int PI_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
static int PI_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* priority inheritance works with all tasks without Resource parameters */ |
return -1; |
} |
static void PI_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* never called!!! */ |
} |
static void PI_res_detach(RLEVEL l, PID p) |
{ |
PI_mutex_resource_des *m = (PI_mutex_resource_des *)(resource_table[l]); |
115,18 → 110,13 |
kern_raise(XMUTEX_OWNER_KILLED, p); |
} |
static int PI_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a) |
{ |
if (a->mclass == PI_MCLASS || a->mclass == (PI_MCLASS | l) ) |
return 0; |
else |
return -1; |
} |
static int PI_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
PI_mutex_t *p; |
if (a->mclass != PI_MCLASS) |
return -1; |
p = (PI_mutex_t *) kern_alloc(sizeof(PI_mutex_t)); |
/* control if there is enough memory; no control on init on a |
299,7 → 289,7 |
return 0; |
} |
void PI_register_module(void) |
RLEVEL PI_register_module(void) |
{ |
RLEVEL l; /* the level that we register */ |
PI_mutex_resource_des *m; /* for readableness only */ |
317,20 → 307,11 |
resource_table[l] = (resource_des *)m; |
/* fill the resource_des descriptor */ |
strncpy(m->m.r.res_name, PI_MODULENAME, MAX_MODULENAME); |
m->m.r.res_code = PI_MODULE_CODE; |
m->m.r.res_version = PI_MODULE_VERSION; |
m->m.r.rtype = MUTEX_RTYPE; |
m->m.r.resource_status = PI_resource_status; |
m->m.r.level_accept_resource_model = PI_level_accept_resource_model; |
m->m.r.res_register = PI_res_register; |
m->m.r.res_detach = PI_res_detach; |
/* fill the mutex_resource_des descriptor */ |
m->m.level_accept_mutexattr = PI_level_accept_mutexattr; |
m->m.init = PI_init; |
m->m.destroy = PI_destroy; |
m->m.lock = PI_lock; |
342,5 → 323,7 |
m->nlocked[i] = 0; |
m->blocked[i] = NIL; |
} |
return l; |
} |
/shark/tags/rel_0_4/kernel/modules/trcfixed.c |
---|
33,6 → 33,13 |
#include <fcntl.h> |
#include <limits.h> |
/* this file implement a fixed queue, that is simply an array that |
is filled with the events until it is full. After that, all the other |
events are discarded. */ |
typedef struct TAGfixed_queue_t { |
int size; |
int index; |
39,9 → 46,13 |
char *filename; |
int uniq; |
trc_event_t table[0]; |
trc_event_t table[0]; |
/* Yes, 0!... the elements are allocated |
in a dirty way into the kern_alloc into fixed_create */ |
} fixed_queue_t; |
/* This function simply return an event to fill (only if the fixed table |
is not yet full) */ |
static trc_event_t *fixed_get(fixed_queue_t *queue) |
{ |
if (queue->index>=queue->size) return NULL; |
48,6 → 59,8 |
return &queue->table[queue->index++]; |
} |
/* since get returns the correct event address, |
the post function does nothing... */ |
static int fixed_post(fixed_queue_t *queue) |
{ |
return 0; |
60,6 → 73,7 |
{ |
fixed_queue_t *ptr; |
/* initialize the default arguments for the fixed queue */ |
if (!once) { |
/* well... this func is called when the system is not running! */ |
once=1; |
67,11 → 81,12 |
} |
if (args==NULL) args=&defaultargs; |
/* allocate the fixed queue data structure plus the array of events */ |
ptr=(fixed_queue_t*)kern_alloc(sizeof(fixed_queue_t)+ |
sizeof(trc_event_t)*(args->size+1)); |
if (ptr==NULL) return -1; |
/* set the current queue pointers and data */ |
queue->get=(trc_event_t*(*)(void*))fixed_get; |
queue->post=(int(*)(void*))fixed_post; |
queue->data=ptr; |
92,9 → 107,6 |
if (queue->filename==NULL) trc_create_name("fix",queue->uniq,pathname); |
else trc_create_name(queue->filename,0,pathname); |
//sys_status(SCHED_STATUS); |
//task_delay(250000); |
h=open("/TEMP/FIX1",O_CREAT|O_TRUNC|O_WRONLY); |
if (h!=-1) { |
write(h,queue->table,queue->index*sizeof(trc_event_t)); |
/shark/tags/rel_0_4/kernel/modules/nopm.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: nopm.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: nopm.c,v 1.3 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
See modules/nopm.h. |
58,7 → 58,6 |
#include <ll/string.h> |
#include <kernel/const.h> |
#include <sys/types.h> |
#include <modules/codes.h> |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
73,7 → 72,7 |
mutex_t structure */ |
typedef struct { |
PID owner; |
QQUEUE blocked; |
IQUEUE blocked; |
int counter; |
} NOPM_mutex_t; |
108,12 → 107,12 |
kern_printf("----------------------\n"); |
for(i=0;i<index;i++) { |
ptr=table[i]->opt; |
if (ptr->blocked.first!=NIL) { |
if (!iq_isempty(&ptr->blocked)) { |
kern_printf("%i blocks on 0x%p: ",ptr->owner,table[i]); |
j=ptr->blocked.first; |
j=iq_query_first(&ptr->blocked); |
while (j!=NIL) { |
kern_printf("%i ",(int)j); |
j=proc_table[j].next; |
j=iq_query_next(j, &ptr->blocked); |
} |
kern_printf("\n"); |
} else { |
138,40 → 137,23 |
#define NOPM_WAIT LIB_STATUS_BASE |
/*+ print resource protocol statistics...+*/ |
static void NOPM_resource_status(RLEVEL r) |
{ |
kern_printf("No status for NOPM module\n"); |
} |
static int NOPM_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
static int NOPM_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* priority inheritance works with all tasks without Resource parameters */ |
return -1; |
} |
static void NOPM_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* never called!!! */ |
} |
static void NOPM_res_detach(RLEVEL l, PID p) |
{ |
} |
static int NOPM_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a) |
{ |
if (a->mclass == NOPM_MCLASS || a->mclass == (NOPM_MCLASS | l) ) |
return 0; |
else |
return -1; |
} |
static int NOPM_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
NOPM_mutex_t *p; |
if (a->mclass != NOPM_MCLASS) |
return -1; |
p = (NOPM_mutex_t *) kern_alloc(sizeof(NOPM_mutex_t)); |
/* control if there is enough memory; no control on init on a |
181,7 → 163,7 |
return (ENOMEM); |
p->owner = NIL; |
qq_init(&p->blocked); |
iq_init(&p->blocked, &freedesc, 0); |
p->counter=0; |
m->mutexlevel = l; |
234,27 → 216,16 |
if (p->owner != NIL) { /* We must block exec task */ |
LEVEL l; /* for readableness only */ |
TIME tx; /* a dummy TIME for timespec operations */ |
struct timespec ty; /* a dummy timespec for timespec operations */ |
proc_table[exec_shadow].context = kern_context_save(); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task in the semaphore queue */ |
proc_table[exec_shadow].status = NOPM_WAIT; |
qq_insertlast(exec_shadow,&p->blocked); |
iq_insertlast(exec_shadow,&p->blocked); |
/* and finally we reschedule */ |
exec = exec_shadow = -1; |
327,13 → 298,13 |
/* the mutex is mine, pop the firsttask to extract */ |
for (;;) { |
e = qq_getfirst(&p->blocked); |
e = iq_getfirst(&p->blocked); |
if (e == NIL) { |
p->owner = NIL; |
break; |
} else if (proc_table[e].status == NOPM_WAIT) { |
l = proc_table[e].task_level; |
level_table[l]->task_insert(l,e); |
level_table[l]->public_unblock(l,e); |
p->counter++; |
break; |
} |
348,7 → 319,7 |
return 0; |
} |
void NOPM_register_module(void) |
RLEVEL NOPM_register_module(void) |
{ |
RLEVEL l; /* the level that we register */ |
NOPM_mutex_resource_des *m; /* for readableness only */ |
365,20 → 336,11 |
resource_table[l] = (resource_des *)m; |
/* fill the resource_des descriptor */ |
strncpy(m->m.r.res_name, NOPM_MODULENAME, MAX_MODULENAME); |
m->m.r.res_code = NOPM_MODULE_CODE; |
m->m.r.res_version = NOPM_MODULE_VERSION; |
m->m.r.rtype = MUTEX_RTYPE; |
m->m.r.resource_status = NOPM_resource_status; |
m->m.r.level_accept_resource_model = NOPM_level_accept_resource_model; |
m->m.r.res_register = NOPM_res_register; |
m->m.r.res_detach = NOPM_res_detach; |
/* fill the mutex_resource_des descriptor */ |
m->m.level_accept_mutexattr = NOPM_level_accept_mutexattr; |
m->m.init = NOPM_init; |
m->m.destroy = NOPM_destroy; |
m->m.lock = NOPM_lock; |
385,5 → 347,6 |
m->m.trylock = NOPM_trylock; |
m->m.unlock = NOPM_unlock; |
return l; |
} |
/shark/tags/rel_0_4/kernel/modules/bd_pscan.c |
---|
38,11 → 38,11 |
*/ |
/* |
* CVS : $Id: bd_pscan.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
* CVS : $Id: bd_pscan.c,v 1.2 2003-01-07 17:07:50 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:52 $ |
* Revision: $Revision: 1.2 $ |
* Last update: $Date: 2003-01-07 17:07:50 $ |
*/ |
#include <modules/bd_pscan.h> |
51,7 → 51,6 |
#include <ll/string.h> |
#include <ll/stdio.h> |
#include <kernel/const.h> |
#include <modules/codes.h> |
#include <sys/types.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
68,21 → 67,21 |
int priority[MAX_PROC]; |
} bd_pscan_resource_des; |
static int res_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
static int res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
assertk(mylevel==l); |
if (r->rclass==BDPSCAN_RCLASS||r->rclass==(BDPSCAN_RCLASS|l)) |
return 0; |
else |
bd_pscan_resource_des *m=(bd_pscan_resource_des*)(resource_table[l]); |
BDPSCAN_RES_MODEL *rm; |
if (r->rclass!=BDEDF_RCLASS) |
return -1; |
} |
static void res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
bd_pscan_resource_des *m=(bd_pscan_resource_des*)(resource_table[l]); |
BDPSCAN_RES_MODEL *rm=(BDPSCAN_RES_MODEL*)r; |
if (r->level && r->level !=l) |
return -1; |
rm=(BDPSCAN_RES_MODEL*)r; |
assertk(mylevel==l); |
m->priority[p]=rm->priority; |
return 0; |
} |
static void res_detach(RLEVEL l, PID p) |
92,10 → 91,7 |
m->priority[p]=LOWESTPRIORITY; |
} |
static void res_resource_status(void) |
{} |
void BD_PSCAN_register_module(void) |
RLEVEL BD_PSCAN_register_module(void) |
{ |
RLEVEL l; |
bd_pscan_resource_des *m; |
111,12 → 107,7 |
resource_table[l]=(resource_des*)m; |
/* fill the resource_des descriptor */ |
strcpy(m->rd.res_name,BDPSCAN_MODULENAME); |
m->rd.res_code=BDPSCAN_MODULE_CODE; |
m->rd.res_version=BDPSCAN_MODULE_VERSION; |
m->rd.rtype=DEFAULT_RTYPE; |
m->rd.resource_status=res_resource_status; |
m->rd.level_accept_resource_model=res_level_accept_resource_model; |
m->rd.res_register=res_register; |
m->rd.res_detach=res_detach; |
124,6 → 115,8 |
assertk(mylevel==-1); |
mylevel=l; |
return l; |
} |
int bd_pscan_getpriority(void) |
/shark/tags/rel_0_4/kernel/modules/rm.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: rm.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: rm.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the scheduling module RM (Rate Monotonic) |
41,7 → 41,7 |
**/ |
/* |
* Copyright (C) 2000 Paolo Gai |
* Copyright (C) 2000,2002 Paolo Gai |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
71,7 → 71,6 |
/*+ Status used in the level +*/ |
#define RM_READY MODULE_STATUS_BASE /*+ - Ready status +*/ |
#define RM_DELAY MODULE_STATUS_BASE+1 /*+ - Delay status +*/ |
#define RM_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/ |
#define RM_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/ |
#define RM_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/ |
94,7 → 93,7 |
/*+ used to manage the JOB_TASK_MODEL and the |
periodicity +*/ |
QUEUE ready; /*+ the ready queue +*/ |
IQUEUE ready; /*+ the ready queue +*/ |
int flags; /*+ the init flags... +*/ |
103,28 → 102,12 |
} RM_level_des; |
static char *RM_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case RM_READY : return "RM_Ready"; |
case RM_DELAY : return "RM_Delay"; |
case RM_WCET_VIOLATED: return "RM_Wcet_Violated"; |
case RM_WAIT : return "RM_Sporadic_Wait"; |
case RM_IDLE : return "RM_Idle"; |
case RM_ZOMBIE : return "RM_Zombie"; |
default : return "RM_Unknown"; |
} |
} |
static void RM_timer_deadline(void *par) |
{ |
PID p = (PID) par; |
RM_level_des *lev; |
struct timespec *temp; |
lev = (RM_level_des *)level_table[proc_table[p].task_level]; |
switch (proc_table[p].status) { |
131,7 → 114,7 |
case RM_ZOMBIE: |
/* we finally put the task in the ready queue */ |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
iq_insertfirst(p,&freedesc); |
/* and free the allocated bandwidth */ |
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet; |
break; |
140,12 → 123,11 |
/* tracer stuff */ |
trc_logevent(TRC_INTACTIVATION,&p); |
/* similar to RM_task_activate */ |
TIMESPEC_ASSIGN(&proc_table[p].request_time, |
&proc_table[p].timespec_priority); |
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority); |
temp = iq_query_timespec(p, &lev->ready); |
ADDUSEC2TIMESPEC(lev->period[p], temp); |
proc_table[p].status = RM_READY; |
q_insert(p,&lev->ready); |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
iq_priority_insert(p,&lev->ready); |
lev->deadline_timer[p] = kern_event_post(temp, |
RM_timer_deadline, |
(void *)p); |
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority ); |
173,112 → 155,16 |
kern_raise(XDEADLINE_MISS,p); |
} |
/*+ this function is called when a task finish his delay +*/ |
static void RM_timer_delay(void *par) |
{ |
PID p = (PID) par; |
RM_level_des *lev; |
lev = (RM_level_des *)level_table[proc_table[p].task_level]; |
proc_table[p].status = RM_READY; |
q_insert(p,&lev->ready); |
proc_table[p].delay_timer = NIL; /* Paranoia */ |
event_need_reschedule(); |
} |
static int RM_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) { |
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m; |
if (h->wcet && h->mit) |
return 0; |
} |
return -1; |
} |
static int RM_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == JOB_PCLASS || m->pclass == (JOB_PCLASS | l)) |
return 0; |
else |
return -1; |
} |
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
static void RM_level_status(LEVEL l) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
PID p = lev->ready; |
kern_printf("Wcet Check : %s\n", |
onoff(lev->flags & RM_ENABLE_WCET_CHECK)); |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & RM_ENABLE_GUARANTEE)); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
while (p != NIL) { |
if ((proc_table[p].pclass) == JOB_PCLASS) |
kern_printf("Pid: %2d (GUEST)\n", p); |
else |
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n", |
p, |
proc_table[p].name, |
lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period ", |
lev->period[p], |
proc_table[p].timespec_priority.tv_sec, |
proc_table[p].timespec_priority.tv_nsec/1000, |
RM_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != RM_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n", |
p, |
proc_table[p].name, |
lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period ", |
lev->period[p], |
proc_table[p].timespec_priority.tv_sec, |
proc_table[p].timespec_priority.tv_nsec/1000, |
RM_status_to_a(proc_table[p].status)); |
} |
/* The scheduler only gets the first task in the queue */ |
static PID RM_level_scheduler(LEVEL l) |
static PID RM_public_scheduler(LEVEL l) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* { // print 4 dbg the ready queue |
PID p= lev->ready; |
kern_printf("(s"); |
while (p != NIL) { |
kern_printf("%d ",p); |
p = proc_table[p].next; |
} |
kern_printf(") "); |
} |
*/ |
return (PID)lev->ready; |
return iq_query_first(&lev->ready); |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int RM_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int RM_public_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
296,16 → 182,19 |
} |
static int RM_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int RM_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* if the RM_task_create is called, then the pclass must be a |
valid pclass. */ |
HARD_TASK_MODEL *h; |
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m; |
if (m->pclass != HARD_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
h = (HARD_TASK_MODEL *)m; |
if (!h->wcet || !h->mit) return -1; |
/* now we know that m is a valid model */ |
proc_table[p].priority = lev->period[p] = h->mit; |
*iq_query_priority(p, &lev->ready) = lev->period[p] = h->mit; |
if (h->periodicity == APERIODIC) |
lev->flag[p] = RM_FLAG_SPORADIC; |
347,7 → 236,7 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
static void RM_task_detach(LEVEL l, PID p) |
static void RM_public_detach(LEVEL l, PID p) |
{ |
/* the RM level doesn't introduce any dinamic allocated new field. |
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated |
361,21 → 250,8 |
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet; |
} |
static int RM_task_eligible(LEVEL l, PID p) |
static void RM_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void RM_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
// kern_printf("(disp %d)",p); |
383,20 → 259,10 |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
q_extract(p, &lev->ready); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
iq_extract(p, &lev->ready); |
} |
static void RM_task_epilogue(LEVEL l, PID p) |
static void RM_public_epilogue(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
410,14 → 276,15 |
} |
else { |
/* the task has been preempted. it returns into the ready queue... */ |
q_insert(p,&lev->ready); |
iq_priority_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
} |
} |
static void RM_task_activate(LEVEL l, PID p) |
static void RM_public_activate(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
struct timespec *temp; |
if (proc_table[p].status == RM_WAIT) { |
kern_raise(XACTIVATION,p); |
432,35 → 299,33 |
/* see also RM_timer_deadline */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
temp = iq_query_timespec(p, &lev->ready); |
kern_gettime(temp); |
ADDUSEC2TIMESPEC(lev->period[p], temp); |
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, |
&proc_table[p].request_time); |
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority); |
/* Insert task in the correct position */ |
proc_table[p].status = RM_READY; |
q_insert(p,&lev->ready); |
iq_priority_insert(p,&lev->ready); |
/* Set the deadline timer */ |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
lev->deadline_timer[p] = kern_event_post(temp, |
RM_timer_deadline, |
(void *)p); |
} |
static void RM_task_insert(LEVEL l, PID p) |
static void RM_public_unblock(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* Similar to RM_task_activate, but we don't check in what state |
the task is and we don't set the request_time*/ |
/* Similar to RM_task_activate, |
but we don't check in what state the task is */ |
/* Insert task in the correct position */ |
proc_table[p].status = RM_READY; |
q_insert(p,&lev->ready); |
iq_priority_insert(p,&lev->ready); |
} |
static void RM_task_extract(LEVEL l, PID p) |
static void RM_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
473,7 → 338,7 |
*/ |
} |
static void RM_task_endcycle(LEVEL l, PID p) |
static int RM_public_message(LEVEL l, PID p, void *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
487,14 → 352,17 |
if (lev->flags & RM_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
/* when the deadline timer fire, it recognize the situation and set |
correctly all the stuffs (like reactivation, request_time, etc... ) */ |
correctly all the stuffs (like reactivation, sleep, etc... ) */ |
return 0; |
} |
static void RM_task_end(LEVEL l, PID p) |
static void RM_public_end(LEVEL l, PID p) |
{ |
// RM_level_des *lev = (RM_level_des *)(level_table[l]); |
proc_table[p].status = RM_ZOMBIE; |
/* When the deadline timer fire, it put the task descriptor in |
501,183 → 369,81 |
the free queue, and free the allocated bandwidth... */ |
} |
static void RM_task_sleep(LEVEL l, PID p) |
static void RM_private_insert(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
JOB_TASK_MODEL *job; |
/* the task has terminated his job before it consume the wcet. All OK! */ |
proc_table[p].status = RM_WAIT; |
if (m->pclass != JOB_PCLASS || (m->level != 0 && m->level != l) ) { |
kern_raise(XINVALID_TASK, p); |
return; |
} |
/* we reset the capacity counters... */ |
if (lev->flags & RM_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
job = (JOB_TASK_MODEL *)m; |
/* when the deadline timer fire, it recognize the situation and set |
correctly the task state to sleep... */ |
} |
static void RM_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
struct timespec wakeuptime; |
// RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* equal to RM_task_endcycle */ |
proc_table[p].status = RM_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT, &wakeuptime); |
ADDUSEC2TIMESPEC(usdelay, &wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
RM_timer_delay, |
(void *)p); |
} |
/* Guest Functions |
These functions manages a JOB_TASK_MODEL, that is used to put |
a guest task in the RM ready queue. */ |
static int RM_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
JOB_TASK_MODEL *job = (JOB_TASK_MODEL *)m; |
/* if the RM_guest_create is called, then the pclass must be a |
valid pclass. */ |
*iq_query_timespec(p,&lev->ready) = job->deadline; |
*iq_query_priority(p, &lev->ready) = lev->period[p] = job->period; |
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &job->deadline); |
lev->deadline_timer[p] = -1; |
/* Insert task in the correct position */ |
iq_priority_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
if (job->noraiseexc) |
lev->flag[p] = RM_FLAG_NORAISEEXC; |
else |
else { |
lev->flag[p] = 0; |
proc_table[p].priority = lev->period[p] = job->period; |
/* there is no bandwidth guarantee at this level, it is performed |
by the level that inserts guest tasks... */ |
return 0; /* OK, also if the task cannot be guaranteed... */ |
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready), |
RM_timer_guest_deadline, |
(void *)p); |
} |
} |
static void RM_guest_detach(LEVEL l, PID p) |
static void RM_private_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the RM level doesn't introduce any dinamic allocated new field. |
No guarantee is performed on guest tasks... so we don't have to reset |
the NO_GUARANTEE FIELD */ |
} |
static void RM_guest_dispatch(LEVEL l, PID p, int nostop) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* the task state is set to EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
q_extract(p, &lev->ready); |
iq_extract(p, &lev->ready); |
} |
static void RM_guest_epilogue(LEVEL l, PID p) |
static void RM_private_epilogue(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* the task has been preempted. it returns into the ready queue... */ |
q_insert(p,&lev->ready); |
iq_priority_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
} |
static void RM_guest_activate(LEVEL l, PID p) |
static void RM_private_extract(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* Insert task in the correct position */ |
q_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
/* Set the deadline timer */ |
if (!(lev->flag[p] & RM_FLAG_NORAISEEXC)) |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
RM_timer_guest_deadline, |
(void *)p); |
} |
static void RM_guest_insert(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* Insert task in the correct position */ |
q_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
} |
static void RM_guest_extract(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
. the state of the task is set by the calling function |
. the deadline must remain... |
So, we do nothing!!! |
*/ |
} |
static void RM_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RM_guest_end(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
//kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
if (proc_table[p].status == RM_READY) |
{ |
q_extract(p, &lev->ready); |
iq_extract(p, &lev->ready); |
//kern_printf("(g_end rdy extr)"); |
} |
else if (proc_table[p].status == RM_DELAY) { |
event_delete(proc_table[p].delay_timer); |
proc_table[p].delay_timer = NIL; /* paranoia */ |
} |
/* we remove the deadline timer, because the slice is finished */ |
if (lev->deadline_timer[p] != NIL) { |
// kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
event_delete(lev->deadline_timer[p]); |
kern_event_delete(lev->deadline_timer[p]); |
lev->deadline_timer[p] = NIL; |
} |
} |
static void RM_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RM_guest_delay(LEVEL l, PID p, TIME usdelay) |
{ |
struct timespec wakeuptime; |
// RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* equal to RM_task_endcycle */ |
proc_table[p].status = RM_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT, &wakeuptime); |
ADDUSEC2TIMESPEC(usdelay, &wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
RM_timer_delay, |
(void *)p); |
} |
/* Registration functions */ |
/*+ Registration function: |
int flags the init flags ... see rm.h +*/ |
void RM_register_level(int flags) |
LEVEL RM_register_level(int flags) |
{ |
LEVEL l; /* the level that we register */ |
RM_level_des *lev; /* for readableness only */ |
686,56 → 452,34 |
printk("RM_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(RM_level_des)); |
/* alloc the space needed for the RM_level_des */ |
lev = (RM_level_des *)kern_alloc(sizeof(RM_level_des)); |
lev = (RM_level_des *)level_table[l]; |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, RM_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = RM_LEVEL_CODE; |
lev->l.level_version = RM_LEVEL_VERSION; |
lev->l.private_insert = RM_private_insert; |
lev->l.private_extract = RM_private_extract; |
lev->l.private_dispatch = RM_private_dispatch; |
lev->l.private_epilogue = RM_private_epilogue; |
lev->l.level_accept_task_model = RM_level_accept_task_model; |
lev->l.level_accept_guest_model = RM_level_accept_guest_model; |
lev->l.level_status = RM_level_status; |
lev->l.level_scheduler = RM_level_scheduler; |
lev->l.public_scheduler = RM_public_scheduler; |
if (flags & RM_ENABLE_GUARANTEE) |
lev->l.level_guarantee = RM_level_guarantee; |
lev->l.public_guarantee = RM_public_guarantee; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.task_create = RM_task_create; |
lev->l.task_detach = RM_task_detach; |
lev->l.task_eligible = RM_task_eligible; |
lev->l.task_dispatch = RM_task_dispatch; |
lev->l.task_epilogue = RM_task_epilogue; |
lev->l.task_activate = RM_task_activate; |
lev->l.task_insert = RM_task_insert; |
lev->l.task_extract = RM_task_extract; |
lev->l.task_endcycle = RM_task_endcycle; |
lev->l.task_end = RM_task_end; |
lev->l.task_sleep = RM_task_sleep; |
lev->l.task_delay = RM_task_delay; |
lev->l.public_create = RM_public_create; |
lev->l.public_detach = RM_public_detach; |
lev->l.public_end = RM_public_end; |
lev->l.public_dispatch = RM_public_dispatch; |
lev->l.public_epilogue = RM_public_epilogue; |
lev->l.public_activate = RM_public_activate; |
lev->l.public_unblock = RM_public_unblock; |
lev->l.public_block = RM_public_block; |
lev->l.public_message = RM_public_message; |
lev->l.guest_create = RM_guest_create; |
lev->l.guest_detach = RM_guest_detach; |
lev->l.guest_dispatch = RM_guest_dispatch; |
lev->l.guest_epilogue = RM_guest_epilogue; |
lev->l.guest_activate = RM_guest_activate; |
lev->l.guest_insert = RM_guest_insert; |
lev->l.guest_extract = RM_guest_extract; |
lev->l.guest_endcycle = RM_guest_endcycle; |
lev->l.guest_end = RM_guest_end; |
lev->l.guest_sleep = RM_guest_sleep; |
lev->l.guest_delay = RM_guest_delay; |
/* fill the RM descriptor part */ |
for(i=0; i<MAX_PROC; i++) { |
lev->period[i] = 0; |
743,18 → 487,17 |
lev->flag[i] = 0; |
} |
lev->ready = NIL; |
iq_init(&lev->ready, &freedesc, 0); |
lev->flags = flags & 0x07; |
lev->U = 0; |
return l; |
} |
bandwidth_t RM_usedbandwidth(LEVEL l) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
if (lev->l.level_code == RM_LEVEL_CODE && |
lev->l.level_version == RM_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
/shark/tags/rel_0_4/kernel/modules/rrsoft.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: rrsoft.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: rrsoft.c,v 1.4 2003-01-07 17:07:51 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:51 $ |
------------ |
This file contains the scheduling module RRSOFT (Round Robin) |
60,10 → 60,10 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/*+ Status used in the level +*/ |
#define RRSOFT_READY MODULE_STATUS_BASE |
#define RRSOFT_DELAY MODULE_STATUS_BASE+1 |
#define RRSOFT_IDLE MODULE_STATUS_BASE+2 |
/*+ the level redefinition for the Round Robin level +*/ |
72,7 → 72,7 |
int nact[MAX_PROC]; /*+ number of pending activations +*/ |
QQUEUE ready; /*+ the ready queue +*/ |
IQUEUE ready; /*+ the ready queue +*/ |
int slice; /*+ the level's time slice +*/ |
93,20 → 93,6 |
} RRSOFT_level_des; |
static char *RRSOFT_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case RRSOFT_READY: return "RRSOFT_Ready"; |
case RRSOFT_DELAY: return "RRSOFT_Delay"; |
case RRSOFT_IDLE : return "RRSOFT_Idle"; |
default : return "RRSOFT_Unknown"; |
} |
} |
/* this is the periodic reactivation of the task... it is posted only |
if the task is a periodic task */ |
static void RRSOFT_timer_reactivate(void *par) |
121,7 → 107,7 |
/* the task has finished the current activation and must be |
reactivated */ |
proc_table[p].status = RRSOFT_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
event_need_reschedule(); |
} |
139,72 → 125,11 |
// trc_logevent(TRC_INTACTIVATION,&p); |
} |
/*+ this function is called when a task finish his delay +*/ |
static void RRSOFT_timer_delay(void *par) |
{ |
PID p = (PID) par; |
RRSOFT_level_des *lev; |
lev = (RRSOFT_level_des *)level_table[proc_table[p].task_level]; |
proc_table[p].status = RRSOFT_READY; |
qq_insertlast(p,&lev->ready); |
proc_table[p].delay_timer = NIL; /* Paranoia */ |
// kern_printf(" DELAY TIMER %d ", p); |
event_need_reschedule(); |
} |
static int RRSOFT_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
if ((m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l)) && lev->models & RRSOFT_ONLY_NRT) |
return 0; |
else if ((m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l)) && lev->models & RRSOFT_ONLY_SOFT) |
return 0; |
else if ((m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) && lev->models & RRSOFT_ONLY_HARD) |
return 0; |
else |
return -1; |
} |
static int RRSOFT_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static void RRSOFT_level_status(LEVEL l) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->ready); |
kern_printf("Slice: %d \n", lev->slice); |
while (p != NIL) { |
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name, |
RRSOFT_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != RRSOFT_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name, |
RRSOFT_status_to_a(proc_table[p].status)); |
} |
/* This is not efficient but very fair :-) |
The need of all this stuff is because if a task execute a long time |
due to (shadow!) priority inheritance, then the task shall go to the |
tail of the queue many times... */ |
static PID RRSOFT_level_scheduler(LEVEL l) |
static PID RRSOFT_public_scheduler(LEVEL l) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
211,7 → 136,7 |
PID p; |
for (;;) { |
p = qq_queryfirst(&lev->ready); |
p = iq_query_first(&lev->ready); |
if (p == -1) |
return p; |
//{kern_printf("(s%d)",p); return p;} |
219,8 → 144,8 |
// kern_printf("(p=%d l=%d avail=%d wcet =%d)\n",p,l,proc_table[p].avail_time, proc_table[p].wcet); |
if (proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_extract(p,&lev->ready); |
qq_insertlast(p,&lev->ready); |
iq_extract(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
else |
//{kern_printf("(s%d)",p); return p;} |
229,17 → 154,8 |
} |
} |
static int RRSOFT_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int RRSOFT_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
/* the RRSOFT level always guarantee... the function is defined because |
there can be an aperiodic server at a level with less priority than |
the RRSOFT that need guarantee (e.g., a TBS server) */ |
return 1; |
} |
static int RRSOFT_task_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
// kern_printf("create %d mod %d\n",p,m->pclass); |
247,6 → 163,11 |
the only thing to set remains the capacity stuffs that are set |
to the values passed in the model... */ |
if ( !(m->pclass==NRT_PCLASS && lev->models & RRSOFT_ONLY_NRT ) ) return -1; |
if ( !(m->pclass==SOFT_PCLASS && lev->models & RRSOFT_ONLY_SOFT) ) return -1; |
if ( !(m->pclass==HARD_PCLASS && lev->models & RRSOFT_ONLY_HARD) ) return -1; |
if (m->level != 0 && m->level != l) return -1; |
/* I used the wcet field because using wcet can account if a task |
consume more than the timeslice... */ |
310,49 → 231,17 |
return 0; /* OK */ |
} |
static void RRSOFT_task_detach(LEVEL l, PID p) |
static void RRSOFT_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the RRSOFT level doesn't introduce any new field in the TASK_MODEL |
so, all detach stuffs are done by the task_create |
The task state is set at FREE by the general task_create */ |
} |
static int RRSOFT_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void RRSOFT_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
//static int p2count=0; |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
qq_extract(p, &lev->ready); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
iq_extract(p, &lev->ready); |
} |
static void RRSOFT_task_epilogue(LEVEL l, PID p) |
static void RRSOFT_public_epilogue(LEVEL l, PID p) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
360,16 → 249,16 |
qqueue position */ |
if (proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
else |
/* curr is >0, so the running task have to run for another cuRRSOFT usec */ |
qq_insertfirst(p,&lev->ready); |
iq_insertfirst(p,&lev->ready); |
proc_table[p].status = RRSOFT_READY; |
} |
static void RRSOFT_task_activate(LEVEL l, PID p) |
static void RRSOFT_public_activate(LEVEL l, PID p) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
381,18 → 270,15 |
return; |
} |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
/* Insert task in the coRRSOFTect position */ |
/* Insert task in the correct position */ |
proc_table[p].status = RRSOFT_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
/* Set the reactivation timer */ |
if (lev->periodic[p]) |
{ |
TIMESPEC_ASSIGN(&lev->reactivation_time[p], &proc_table[p].request_time); |
kern_gettime(&lev->reactivation_time[p]); |
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]); |
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]); |
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p], |
RRSOFT_timer_reactivate, |
(void *)p); |
399,19 → 285,19 |
} |
} |
static void RRSOFT_task_insert(LEVEL l, PID p) |
static void RRSOFT_public_unblock(LEVEL l, PID p) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
/* Similar to RRSOFT_task_activate, but we don't check in what state |
the task is and we don't set the request_time */ |
the task is */ |
/* Insert task in the coRRSOFTect position */ |
proc_table[p].status = RRSOFT_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
static void RRSOFT_task_extract(LEVEL l, PID p) |
static void RRSOFT_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
423,23 → 309,27 |
*/ |
} |
static void RRSOFT_task_endcycle(LEVEL l, PID p) |
static int RRSOFT_public_message(LEVEL l, PID p, void *m) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
if (lev->nact[p] > 0) { |
/* continue!!!! */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
lev->nact[p]--; |
// qq_insertlast(p,&lev->ready); |
qq_insertfirst(p,&lev->ready); |
iq_insertfirst(p,&lev->ready); |
proc_table[p].status = RRSOFT_READY; |
} |
else |
proc_table[p].status = RRSOFT_IDLE; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
return 0; |
} |
static void RRSOFT_task_end(LEVEL l, PID p) |
static void RRSOFT_public_end(LEVEL l, PID p) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
447,83 → 337,15 |
/* we delete the reactivation timer */ |
if (lev->periodic[p]) { |
event_delete(lev->reactivation_timer[p]); |
kern_event_delete(lev->reactivation_timer[p]); |
lev->reactivation_timer[p] = -1; |
} |
/* then, we insert the task in the free queue */ |
proc_table[p].status = FREE; |
q_insert(p,&freedesc); |
iq_insertlast(p,&freedesc); |
} |
static void RRSOFT_task_sleep(LEVEL l, PID p) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
if (lev->nact[p] >= 0) lev->nact[p] = 0; |
/* we delete the reactivation timer */ |
if (lev->periodic[p]) { |
event_delete(lev->reactivation_timer[p]); |
lev->reactivation_timer[p] = -1; |
} |
proc_table[p].status = SLEEP; |
} |
static void RRSOFT_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
// RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
struct timespec wakeuptime; |
/* equal to RRSOFT_task_endcycle */ |
proc_table[p].status = RRSOFT_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT,&wakeuptime); |
ADDUSEC2TIMESPEC(usdelay,&wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
RRSOFT_timer_delay, |
(void *)p); |
} |
static int RRSOFT_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void RRSOFT_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
/*+ This init function install the "main" task +*/ |
551,7 → 373,7 |
if (p == NIL) |
printk("\nPanic!!! can't create main task...\n"); |
RRSOFT_task_activate(lev,p); |
RRSOFT_public_activate(lev,p); |
} |
559,7 → 381,7 |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void RRSOFT_register_level(TIME slice, |
LEVEL RRSOFT_register_level(TIME slice, |
int createmain, |
struct multiboot_info *mb, |
BYTE models) |
571,52 → 393,23 |
printk("RRSOFT_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(RRSOFT_level_des)); |
/* alloc the space needed for the RRSOFT_level_des */ |
lev = (RRSOFT_level_des *)kern_alloc(sizeof(RRSOFT_level_des)); |
lev = (RRSOFT_level_des *)level_table[l]; |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, RRSOFT_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = RRSOFT_LEVEL_CODE; |
lev->l.level_version = RRSOFT_LEVEL_VERSION; |
lev->l.public_scheduler = RRSOFT_public_scheduler; |
lev->l.public_create = RRSOFT_public_create; |
lev->l.public_end = RRSOFT_public_end; |
lev->l.public_dispatch = RRSOFT_public_dispatch; |
lev->l.public_epilogue = RRSOFT_public_epilogue; |
lev->l.public_activate = RRSOFT_public_activate; |
lev->l.public_unblock = RRSOFT_public_unblock; |
lev->l.public_block = RRSOFT_public_block; |
lev->l.public_message = RRSOFT_public_message; |
lev->l.level_accept_task_model = RRSOFT_level_accept_task_model; |
lev->l.level_accept_guest_model = RRSOFT_level_accept_guest_model; |
lev->l.level_status = RRSOFT_level_status; |
lev->l.level_scheduler = RRSOFT_level_scheduler; |
lev->l.level_guarantee = RRSOFT_level_guarantee; |
lev->l.task_create = RRSOFT_task_create; |
lev->l.task_detach = RRSOFT_task_detach; |
lev->l.task_eligible = RRSOFT_task_eligible; |
lev->l.task_dispatch = RRSOFT_task_dispatch; |
lev->l.task_epilogue = RRSOFT_task_epilogue; |
lev->l.task_activate = RRSOFT_task_activate; |
lev->l.task_insert = RRSOFT_task_insert; |
lev->l.task_extract = RRSOFT_task_extract; |
lev->l.task_endcycle = RRSOFT_task_endcycle; |
lev->l.task_end = RRSOFT_task_end; |
lev->l.task_sleep = RRSOFT_task_sleep; |
lev->l.task_delay = RRSOFT_task_delay; |
lev->l.guest_create = RRSOFT_guest_create; |
lev->l.guest_detach = RRSOFT_guest_detach; |
lev->l.guest_dispatch = RRSOFT_guest_dispatch; |
lev->l.guest_epilogue = RRSOFT_guest_epilogue; |
lev->l.guest_activate = RRSOFT_guest_activate; |
lev->l.guest_insert = RRSOFT_guest_insert; |
lev->l.guest_extract = RRSOFT_guest_extract; |
lev->l.guest_endcycle = RRSOFT_guest_endcycle; |
lev->l.guest_end = RRSOFT_guest_end; |
lev->l.guest_sleep = RRSOFT_guest_sleep; |
lev->l.guest_delay = RRSOFT_guest_delay; |
/* fill the RRSOFT descriptor part */ |
for (i = 0; i < MAX_PROC; i++) { |
lev->nact[i] = -1; |
626,7 → 419,7 |
lev->period[i] = 0; |
} |
qq_init(&lev->ready); |
iq_init(&lev->ready, &freedesc, 0); |
if (slice < RRSOFT_MINIMUM_SLICE) slice = RRSOFT_MINIMUM_SLICE; |
if (slice > RRSOFT_MAXIMUM_SLICE) slice = RRSOFT_MAXIMUM_SLICE; |
638,6 → 431,8 |
if (createmain) |
sys_atrunlevel(RRSOFT_call_main,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
/shark/tags/rel_0_4/kernel/modules/ps.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: ps.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: ps.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the aperiodic server PS (Polling Server) |
103,6 → 103,7 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/*+ Status used in the level +*/ |
#define PS_WAIT APER_STATUS_BASE /*+ waiting the service +*/ |
122,7 → 123,7 |
int Cs; /*+ server capacity +*/ |
int availCs; /*+ server avail time +*/ |
QQUEUE wait; /*+ the wait queue of the PS +*/ |
IQUEUE wait; /*+ the wait queue of the PS +*/ |
PID activated; /*+ the task inserted in another queue +*/ |
int flags; /*+ the init flags... +*/ |
145,8 → 146,7 |
m = lev->scheduling_level; |
job_task_default_model(j,lev->lastdline); |
job_task_def_period(j,lev->period); |
level_table[m]->guest_create(m,p,(TASK_MODEL *)&j); |
level_table[m]->guest_activate(m,p); |
level_table[m]->private_insert(m,p,(TASK_MODEL *)&j); |
// kern_printf("(%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec); |
} |
167,8 → 167,8 |
was not any other task to be put in the ready queue |
... we are now activating the next task */ |
if (lev->availCs > 0 && lev->activated == NIL) { |
if (qq_queryfirst(&lev->wait) != NIL) { |
lev->activated = qq_getfirst(&lev->wait); |
if (iq_query_first(&lev->wait) != NIL) { |
lev->activated = iq_getfirst(&lev->wait); |
PS_activation(lev); |
event_need_reschedule(); |
} |
180,80 → 180,8 |
// kern_printf("!"); |
} |
static char *PS_status_to_a(WORD status) |
static PID PS_public_schedulerbackground(LEVEL l) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case PS_WAIT : return "PS_Wait"; |
default : return "PS_Unknown"; |
} |
} |
static int PS_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) { |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
if (s->periodicity == APERIODIC) |
return 0; |
} |
return -1; |
} |
static int PS_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
static void PS_level_status(LEVEL l) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->wait); |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & PS_ENABLE_GUARANTEE_EDF || |
lev->flags & PS_ENABLE_GUARANTEE_RM )); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
if (lev->activated != -1) |
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n", |
lev->activated, |
proc_table[lev->activated].name, |
proc_table[lev->activated].timespec_priority.tv_sec, |
proc_table[lev->activated].timespec_priority.tv_nsec, |
lev->nact[lev->activated], |
PS_status_to_a(proc_table[lev->activated].status)); |
while (p != NIL) { |
kern_printf("Pid: %2d Name: %10s Stat: %s\n", |
p, |
proc_table[p].name, |
PS_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
} |
static PID PS_level_scheduler(LEVEL l) |
{ |
/* the PS don't schedule anything... |
it's an EDF level or similar that do it! */ |
return NIL; |
} |
static PID PS_level_schedulerbackground(LEVEL l) |
{ |
/* the PS catch the background time to exec aperiodic activities */ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
262,11 → 190,11 |
if (lev->flags & PS_BACKGROUND_BLOCK) |
return NIL; |
else |
return qq_queryfirst(&lev->wait); |
return iq_query_first(&lev->wait); |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int PS_level_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth) |
static int PS_public_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
278,7 → 206,7 |
return 0; |
} |
static int PS_level_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth) |
static int PS_public_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
290,13 → 218,17 |
return 0; |
} |
static int PS_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int PS_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
SOFT_TASK_MODEL *s; |
/* if the PS_task_create is called, then the pclass must be a |
valid pclass. */ |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
if (m->pclass != SOFT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (s->periodicity != APERIODIC) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (s->arrivals == SAVE_ARRIVALS) |
lev->nact[p] = 0; |
306,26 → 238,8 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
static void PS_task_detach(LEVEL l, PID p) |
static void PS_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the PS level doesn't introduce any dinamic allocated new field. */ |
} |
static int PS_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void PS_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
struct timespec ty; |
335,13 → 249,13 |
to exe before calling task_dispatch. we have to check |
lev->activated != p instead */ |
if (lev->activated != p) { |
qq_extract(p, &lev->wait); |
iq_extract(p, &lev->wait); |
//kern_printf("#%d#",p); |
} |
else { |
//if (nostop) kern_printf("(gd status=%d)",proc_table[p].status); |
level_table[ lev->scheduling_level ]-> |
guest_dispatch(lev->scheduling_level,p,nostop); |
private_dispatch(lev->scheduling_level,p,nostop); |
} |
/* set the capacity timer */ |
352,19 → 266,9 |
} |
// kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
} |
static void PS_task_epilogue(LEVEL l, PID p) |
static void PS_public_epilogue(LEVEL l, PID p) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
struct timespec ty; |
396,8 → 300,8 |
task point the shadow to it!!!*/ |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
qq_insertfirst(p, &lev->wait); |
private_extract(lev->scheduling_level,p); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = PS_WAIT; |
lev->activated = NIL; |
} |
406,14 → 310,14 |
wait queue by calling the guest_epilogue... */ |
if (lev->activated == p) {//kern_printf("Û1"); |
level_table[ lev->scheduling_level ]-> |
guest_epilogue(lev->scheduling_level,p); |
private_epilogue(lev->scheduling_level,p); |
} else { //kern_printf("Û2"); |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = PS_WAIT; |
} |
} |
static void PS_task_activate(LEVEL l, PID p) |
static void PS_public_activate(LEVEL l, PID p) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
422,7 → 326,6 |
lev->nact[p]++; |
} |
else if (proc_table[p].status == SLEEP) { |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
if (lev->activated == NIL && lev->availCs > 0) { |
lev->activated = p; |
429,7 → 332,7 |
PS_activation(lev); |
} |
else { |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
proc_table[p].status = PS_WAIT; |
} |
} |
439,7 → 342,7 |
} |
static void PS_task_insert(LEVEL l, PID p) |
static void PS_public_unblock(LEVEL l, PID p) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
449,11 → 352,11 |
/* when we reinsert the task into the system, the server capacity |
is always 0 because nobody executes with the PS before... */ |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = PS_WAIT; |
} |
static void PS_task_extract(LEVEL l, PID p) |
static void PS_public_block(LEVEL l, PID p) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
464,10 → 367,10 |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
} |
static void PS_task_endcycle(LEVEL l, PID p) |
static int PS_public_message(LEVEL l, PID p, void *m) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
struct timespec ty; |
484,56 → 387,32 |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
else |
qq_extract(p, &lev->wait); |
iq_extract(p, &lev->wait); |
if (lev->nact[p] > 0) |
{ |
lev->nact[p]--; |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
proc_table[p].status = PS_WAIT; |
} |
else |
proc_table[p].status = SLEEP; |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated == NIL) |
lev->availCs = 0; /* see note (*) at the begin of the file */ |
else |
PS_activation(lev); |
} |
static void PS_task_end(LEVEL l, PID p) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
struct timespec ty; |
TIME tx; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
/* update the server capacity */ |
if (lev->flags & PS_BACKGROUND) |
lev->flags &= ~PS_BACKGROUND; |
else { |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
lev->availCs -= tx; |
} |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
lev->activated = qq_getfirst(&lev->wait); |
if (lev->activated == NIL) |
lev->availCs = 0; /* see note (*) at the begin of the file */ |
else |
PS_activation(lev); |
return 0; |
} |
static void PS_task_sleep(LEVEL l, PID p) |
static void PS_public_end(LEVEL l, PID p) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
struct timespec ty; |
548,80 → 427,20 |
lev->availCs -= tx; |
} |
if (lev->nact[p] >= 0) lev->nact[p] = 0; |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
else |
qq_extract(p, &lev->wait); |
private_extract(lev->scheduling_level,p); |
proc_table[p].status = SLEEP; |
proc_table[p].status = FREE; |
iq_insertfirst(p,&freedesc); |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated == NIL) |
lev->availCs = 0; /* see note (*) at the begin of the file */ |
else |
PS_activation(lev); |
} |
static void PS_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
struct timespec ty; |
TIME tx; |
/* update the server capacity */ |
if (lev->flags & PS_BACKGROUND) |
lev->flags &= ~PS_BACKGROUND; |
else { |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
lev->availCs -= tx; |
} |
/* I hope no delay when owning a mutex... */ |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_delay(lev->scheduling_level,p,usdelay); |
} |
static int PS_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void PS_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
631,7 → 450,7 |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[(LEVEL)l]); |
ll_gettime(TIME_EXACT,&lev->lastdline); |
kern_gettime(&lev->lastdline); |
ADDUSEC2TIMESPEC(lev->period, &lev->lastdline); |
kern_event_post(&lev->lastdline, PS_deadline_timer, l); |
641,7 → 460,7 |
/*+ Registration function: |
int flags the init flags ... see PS.h +*/ |
void PS_register_level(int flags, LEVEL master, int Cs, int per) |
LEVEL PS_register_level(int flags, LEVEL master, int Cs, int per) |
{ |
LEVEL l; /* the level that we register */ |
PS_level_des *lev; /* for readableness only */ |
650,64 → 469,33 |
printk("PS_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(PS_level_des)); |
printk(" alloco descrittore %d %d\n",l,(int)sizeof(PS_level_des)); |
lev = (PS_level_des *)level_table[l]; |
/* alloc the space needed for the PS_level_des */ |
lev = (PS_level_des *)kern_alloc(sizeof(PS_level_des)); |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, PS_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = PS_LEVEL_CODE; |
lev->l.level_version = PS_LEVEL_VERSION; |
lev->l.level_accept_task_model = PS_level_accept_task_model; |
lev->l.level_accept_guest_model = PS_level_accept_guest_model; |
lev->l.level_status = PS_level_status; |
if (flags & PS_ENABLE_BACKGROUND) |
lev->l.level_scheduler = PS_level_schedulerbackground; |
else |
lev->l.level_scheduler = PS_level_scheduler; |
lev->l.public_scheduler = PS_public_schedulerbackground; |
if (flags & PS_ENABLE_GUARANTEE_EDF) |
lev->l.level_guarantee = PS_level_guaranteeEDF; |
lev->l.public_guarantee = PS_public_guaranteeEDF; |
else if (flags & PS_ENABLE_GUARANTEE_RM) |
lev->l.level_guarantee = PS_level_guaranteeRM; |
lev->l.public_guarantee = PS_public_guaranteeRM; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.task_create = PS_task_create; |
lev->l.task_detach = PS_task_detach; |
lev->l.task_eligible = PS_task_eligible; |
lev->l.task_dispatch = PS_task_dispatch; |
lev->l.task_epilogue = PS_task_epilogue; |
lev->l.task_activate = PS_task_activate; |
lev->l.task_insert = PS_task_insert; |
lev->l.task_extract = PS_task_extract; |
lev->l.task_endcycle = PS_task_endcycle; |
lev->l.task_end = PS_task_end; |
lev->l.task_sleep = PS_task_sleep; |
lev->l.task_delay = PS_task_delay; |
lev->l.public_create = PS_public_create; |
lev->l.public_end = PS_public_end; |
lev->l.public_dispatch = PS_public_dispatch; |
lev->l.public_epilogue = PS_public_epilogue; |
lev->l.public_activate = PS_public_activate; |
lev->l.public_unblock = PS_public_unblock; |
lev->l.public_block = PS_public_block; |
lev->l.public_message = PS_public_message; |
lev->l.guest_create = PS_guest_create; |
lev->l.guest_detach = PS_guest_detach; |
lev->l.guest_dispatch = PS_guest_dispatch; |
lev->l.guest_epilogue = PS_guest_epilogue; |
lev->l.guest_activate = PS_guest_activate; |
lev->l.guest_insert = PS_guest_insert; |
lev->l.guest_extract = PS_guest_extract; |
lev->l.guest_endcycle = PS_guest_endcycle; |
lev->l.guest_end = PS_guest_end; |
lev->l.guest_sleep = PS_guest_sleep; |
lev->l.guest_delay = PS_guest_delay; |
/* fill the PS descriptor part */ |
for (i=0; i<MAX_PROC; i++) |
718,7 → 506,7 |
lev->period = per; |
qq_init(&lev->wait); |
iq_init(&lev->wait, &freedesc, 0); |
lev->activated = NIL; |
lev->U = (MAX_BANDWIDTH / per) * Cs; |
728,15 → 516,14 |
lev->flags = flags & 0x07; |
sys_atrunlevel(PS_dline_install,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
bandwidth_t PS_usedbandwidth(LEVEL l) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
if (lev->l.level_code == PS_LEVEL_CODE && |
lev->l.level_version == PS_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
/shark/tags/rel_0_4/kernel/modules/rr.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: rr.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: rr.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the scheduling module RR (Round Robin) |
60,16 → 60,20 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
//#define RRDEBUG |
#define rr_printf kern_printf |
/*+ Status used in the level +*/ |
#define RR_READY MODULE_STATUS_BASE |
#define RR_DELAY MODULE_STATUS_BASE+1 |
/*+ the level redefinition for the Round Robin level +*/ |
typedef struct { |
level_des l; /*+ the standard level descriptor +*/ |
QQUEUE ready; /*+ the ready queue +*/ |
IQUEUE ready; /*+ the ready queue +*/ |
int slice; /*+ the level's time slice +*/ |
77,112 → 81,57 |
the main task +*/ |
} RR_level_des; |
static char *RR_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case RR_READY: return "RR_Ready"; |
case RR_DELAY: return "RR_Delay"; |
default : return "RR_Unknown"; |
} |
} |
/*+ this function is called when a task finish his delay +*/ |
static void RR_timer_delay(void *par) |
{ |
PID p = (PID) par; |
RR_level_des *lev; |
lev = (RR_level_des *)level_table[proc_table[p].task_level]; |
proc_table[p].status = RR_READY; |
qq_insertlast(p,&lev->ready); |
proc_table[p].delay_timer = NIL; /* Paranoia */ |
// kern_printf(" DELAY TIMER %d ", p); |
event_need_reschedule(); |
} |
static int RR_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l)) |
return 0; |
else |
return -1; |
} |
static int RR_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static void RR_level_status(LEVEL l) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->ready); |
kern_printf("Slice: %d \n", lev->slice); |
while (p != NIL) { |
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name, |
RR_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != RR_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name, |
RR_status_to_a(proc_table[p].status)); |
} |
/* This is not efficient but very fair :-) |
The need of all this stuff is because if a task execute a long time |
due to (shadow!) priority inheritance, then the task shall go to the |
tail of the queue many times... */ |
static PID RR_level_scheduler(LEVEL l) |
static PID RR_public_scheduler(LEVEL l) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
PID p; |
#ifdef RRDEBUG |
rr_printf("(RRs",p); |
#endif |
for (;;) { |
p = qq_queryfirst(&lev->ready); |
if (p == -1) |
p = iq_query_first(&lev->ready); |
if (p == -1) { |
#ifdef RRDEBUG |
rr_printf(" %d)",p); |
#endif |
return p; |
} |
if (proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_extract(p,&lev->ready); |
qq_insertlast(p,&lev->ready); |
iq_extract(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
else |
else { |
#ifdef RRDEBUG |
rr_printf(" %d)",p); |
#endif |
return p; |
} |
} |
} |
static int RR_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int RR_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
/* the RR level always guarantee... the function is defined because |
there can be an aperiodic server at a level with less priority than |
the RR that need guarantee (e.g., a TBS server) */ |
return 1; |
} |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
NRT_TASK_MODEL *nrt; |
#ifdef RRDEBUG |
rr_printf("(create %d!!!!)",p); |
#endif |
static int RR_task_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m; |
if (m->pclass != NRT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
nrt = (NRT_TASK_MODEL *)m; |
/* the task state is set at SLEEP by the general task_create |
the only thing to set remains the capacity stuffs that are set |
to the values passed in the model... */ |
200,53 → 149,27 |
} |
proc_table[p].control |= CONTROL_CAP; |
#ifdef RRDEBUG |
rr_printf("(c%d av%d w%d )",p,proc_table[p].avail_time,proc_table[p].wcet); |
#endif |
return 0; /* OK */ |
} |
static void RR_task_detach(LEVEL l, PID p) |
static void RR_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the RR level doesn't introduce any new field in the TASK_MODEL |
so, all detach stuffs are done by the task_create |
The task state is set at FREE by the general task_create */ |
} |
static int RR_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void RR_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
qq_extract(p, &lev->ready); |
iq_extract(p, &lev->ready); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
// if (nostop) kern_printf("Û"); |
// kern_printf("(RR d %d)",nostop); |
#ifdef RRDEBUG |
rr_printf("(dis%d)",p); |
#endif |
} |
static void RR_task_epilogue(LEVEL l, PID p) |
static void RR_public_epilogue(LEVEL l, PID p) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
254,16 → 177,20 |
qqueue position */ |
if (proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
else |
/* curr is >0, so the running task have to run for another curr usec */ |
qq_insertfirst(p,&lev->ready); |
iq_insertfirst(p,&lev->ready); |
proc_table[p].status = RR_READY; |
#ifdef RRDEBUG |
rr_printf("(epi%d)",p); |
#endif |
} |
static void RR_task_activate(LEVEL l, PID p) |
static void RR_public_activate(LEVEL l, PID p) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
272,26 → 199,33 |
if (proc_table[p].status != SLEEP) |
return; |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
/* Insert task in the correct position */ |
proc_table[p].status = RR_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
#ifdef RRDEBUG |
rr_printf("(act%d)",p); |
#endif |
} |
static void RR_task_insert(LEVEL l, PID p) |
static void RR_public_unblock(LEVEL l, PID p) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
/* Similar to RR_task_activate, but we don't check in what state |
the task is and we don't set the request_time */ |
/* Similar to RR_task_activate, |
but we don't check in what state the task is */ |
/* Insert task in the correct position */ |
proc_table[p].status = RR_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
#ifdef RRDEBUG |
rr_printf("(ubl%d)",p); |
#endif |
} |
static void RR_task_extract(LEVEL l, PID p) |
static void RR_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
301,84 → 235,36 |
So, we do nothing!!! |
*/ |
#ifdef RRDEBUG |
rr_printf("(bl%d)",p); |
#endif |
} |
static void RR_task_endcycle(LEVEL l, PID p) |
static int RR_public_message(LEVEL l, PID p, void *m) |
{ |
// RR_level_des *lev = (RR_level_des *)(level_table[l]); |
proc_table[p].status = SLEEP; |
/* this function is equal to the RR_task_extract, except that |
the task fall asleep... */ |
proc_table[p].status = SLEEP; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
#ifdef RRDEBUG |
rr_printf("(msg%d)",p); |
#endif |
return 0; |
} |
static void RR_task_end(LEVEL l, PID p) |
static void RR_public_end(LEVEL l, PID p) |
{ |
// RR_level_des *lev = (RR_level_des *)(level_table[l]); |
/* we insert the task in the free queue */ |
proc_table[p].status = FREE; |
q_insert(p,&freedesc); |
} |
iq_insertlast(p,&freedesc); |
static void RR_task_sleep(LEVEL l, PID p) |
{ |
proc_table[p].status = SLEEP; |
#ifdef RRDEBUG |
rr_printf("(end%d)",p); |
#endif |
} |
static void RR_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
// RR_level_des *lev = (RR_level_des *)(level_table[l]); |
struct timespec wakeuptime; |
/* equal to RR_task_endcycle */ |
proc_table[p].status = RR_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT,&wakeuptime); |
ADDUSEC2TIMESPEC(usdelay,&wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
RR_timer_delay, |
(void *)p); |
} |
static int RR_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void RR_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
/*+ This init function install the "main" task +*/ |
404,9 → 290,13 |
p = task_create("Main", __init__, (TASK_MODEL *)&m, NULL); |
if (p == NIL) |
kern_printf("\nPanic!!! can't create main task... errno =%d\n",errno); |
printk(KERN_EMERG "Panic!!! can't create main task... errno =%d\n",errno); |
RR_task_activate(lev,p); |
RR_public_activate(lev,p); |
#ifdef RRDEBUG |
rr_printf("(main created %d)",p); |
#endif |
} |
414,7 → 304,7 |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void RR_register_level(TIME slice, |
LEVEL RR_register_level(TIME slice, |
int createmain, |
struct multiboot_info *mb) |
{ |
424,54 → 314,25 |
printk("RR_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(RR_level_des)); |
/* alloc the space needed for the RR_level_des */ |
lev = (RR_level_des *)kern_alloc(sizeof(RR_level_des)); |
lev = (RR_level_des *)level_table[l]; |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, RR_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = RR_LEVEL_CODE; |
lev->l.level_version = RR_LEVEL_VERSION; |
lev->l.public_scheduler = RR_public_scheduler; |
lev->l.public_create = RR_public_create; |
lev->l.public_end = RR_public_end; |
lev->l.public_dispatch = RR_public_dispatch; |
lev->l.public_epilogue = RR_public_epilogue; |
lev->l.public_activate = RR_public_activate; |
lev->l.public_unblock = RR_public_unblock; |
lev->l.public_block = RR_public_block; |
lev->l.public_message = RR_public_message; |
lev->l.level_accept_task_model = RR_level_accept_task_model; |
lev->l.level_accept_guest_model = RR_level_accept_guest_model; |
lev->l.level_status = RR_level_status; |
lev->l.level_scheduler = RR_level_scheduler; |
lev->l.level_guarantee = RR_level_guarantee; |
lev->l.task_create = RR_task_create; |
lev->l.task_detach = RR_task_detach; |
lev->l.task_eligible = RR_task_eligible; |
lev->l.task_dispatch = RR_task_dispatch; |
lev->l.task_epilogue = RR_task_epilogue; |
lev->l.task_activate = RR_task_activate; |
lev->l.task_insert = RR_task_insert; |
lev->l.task_extract = RR_task_extract; |
lev->l.task_endcycle = RR_task_endcycle; |
lev->l.task_end = RR_task_end; |
lev->l.task_sleep = RR_task_sleep; |
lev->l.task_delay = RR_task_delay; |
lev->l.guest_create = RR_guest_create; |
lev->l.guest_detach = RR_guest_detach; |
lev->l.guest_dispatch = RR_guest_dispatch; |
lev->l.guest_epilogue = RR_guest_epilogue; |
lev->l.guest_activate = RR_guest_activate; |
lev->l.guest_insert = RR_guest_insert; |
lev->l.guest_extract = RR_guest_extract; |
lev->l.guest_endcycle = RR_guest_endcycle; |
lev->l.guest_end = RR_guest_end; |
lev->l.guest_sleep = RR_guest_sleep; |
lev->l.guest_delay = RR_guest_delay; |
/* fill the RR descriptor part */ |
qq_init(&lev->ready); |
iq_init(&lev->ready, &freedesc, 0); |
if (slice < RR_MINIMUM_SLICE) slice = RR_MINIMUM_SLICE; |
if (slice > RR_MAXIMUM_SLICE) slice = RR_MAXIMUM_SLICE; |
481,6 → 342,6 |
if (createmain) |
sys_atrunlevel(RR_call_main,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
/shark/tags/rel_0_4/kernel/modules/sem.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: sem.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: sem.c,v 1.3 2003-01-07 17:07:51 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:51 $ |
------------ |
This file contains the Hartik 3.3.1 Semaphore functions |
79,7 → 79,7 |
char *name; /* a name, for named semaphores */ |
int index; /* an index for sem_open, containing the sem number */ |
int count; /* the semaphore counter */ |
QQUEUE blocked; /* the blocked processes queue */ |
IQUEUE blocked; /* the blocked processes queue */ |
int next; /* the semaphore queue */ |
BYTE used; /* 1 if the semaphore is used */ |
} sem_table[SEM_NSEMS_MAX]; |
91,7 → 91,7 |
int sem; /* the semaphore on whitch the process is blocked */ |
} sp_table[MAX_PROC]; |
static QUEUE free_sem; /* Queue of free sem */ |
static int free_sem; /* Queue of free sem */ |
112,10 → 112,10 |
task_testcancel */ |
/* extract the process from the semaphore queue... */ |
qq_extract(i,&sem_table[ sp_table[i].sem ].blocked); |
iq_extract(i,&sem_table[ sp_table[i].sem ].blocked); |
l = proc_table[i].task_level; |
level_table[l]->task_insert(l,i); |
level_table[l]->public_unblock(l,i); |
return 1; |
} |
134,7 → 134,7 |
sem_table[i].name = NULL; |
sem_table[i].index = i; |
sem_table[i].count = 0; |
qq_init(&sem_table[i].blocked); |
iq_init(&sem_table[i].blocked, &freedesc, 0); |
sem_table[i].next = i+1; |
sem_table[i].used = 0; |
} |
160,7 → 160,7 |
free_sem = sem_table[*sem].next; |
sem_table[*sem].name = NULL; |
sem_table[*sem].count = value; |
qq_init(&sem_table[*sem].blocked); |
iq_init(&sem_table[*sem].blocked, &freedesc, 0); |
sem_table[*sem].used = 1; |
} |
else { |
254,7 → 254,7 |
sem_table[sem].name = kern_alloc(strlen((char *)name)+1); |
strcpy(sem_table[sem].name, (char *)name); |
sem_table[sem].count = j; |
qq_init(&sem_table[sem].blocked); |
iq_init(&sem_table[sem].blocked, &freedesc, 0); |
sem_table[sem].used = 1; |
kern_sti(); |
return &sem_table[sem].index; |
350,25 → 350,14 |
if (s1->blocked.first != NIL || s1->count == 0) { |
/* We must block exec task */ |
LEVEL l; /* for readableness only */ |
TIME tx; /* a dummy TIME for timespec operations */ |
struct timespec ty; /* a dummy timespec for timespec operations */ |
/* tracer stuff */ |
trc_logevent(TRC_SEM_WAIT,s); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task in the semaphore queue */ |
proc_table[exec_shadow].status = WAIT_SEM; |
378,7 → 367,7 |
sp_table[exec_shadow].sem = *s; |
/* ...and put it in sem queue */ |
qq_insertlast(exec_shadow,&s1->blocked); |
iq_insertlast(exec_shadow,&s1->blocked); |
/* and finally we reschedule */ |
exec = exec_shadow = -1; |
476,25 → 465,14 |
if (s1->blocked.first != NIL || s1->count < n) { |
/* We must block exec task */ |
LEVEL l; /* for readableness only */ |
TIME tx; /* a dummy TIME for timespec operations */ |
struct timespec ty; /* a dummy timespec for timespec operations */ |
/* tracer */ |
trc_logevent(TRC_SEM_WAIT,s); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task in the semaphore queue */ |
proc_table[exec_shadow].status = WAIT_SEM; |
504,7 → 482,7 |
sp_table[exec_shadow].sem = *s; |
/* ...and put it in sem queue */ |
qq_insertlast(exec_shadow,&s1->blocked); |
iq_insertlast(exec_shadow,&s1->blocked); |
/* and finally we reschedule */ |
exec = exec_shadow = -1; |
554,10 → 532,10 |
s1->count -= sp_table[p].decsem; |
/* Get task from blocked queue */ |
qq_extract(p,&s1->blocked); |
iq_extract(p,&s1->blocked); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
/* only a task can be awaken */ |
/* Preempt if necessary */ |
event_need_reschedule(); |
579,10 → 557,10 |
s1->count -= sp_table[p].decsem; |
/* Get task from blocked queue */ |
qq_extract(p,&s1->blocked); |
iq_extract(p,&s1->blocked); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
/* only a task can be awaken */ |
/* Preempt if necessary */ |
scheduler(); |
627,10 → 605,10 |
s1->count -= sp_table[p].decsem; |
/* Get task from blocked queue */ |
qq_extract(p,&s1->blocked); |
iq_extract(p,&s1->blocked); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
/* Next task to wake */ |
p = s1->blocked.first; |
657,10 → 635,10 |
s1->count -= sp_table[p].decsem; |
/* Get task from blocked queue */ |
qq_extract(p,&s1->blocked); |
iq_extract(p,&s1->blocked); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
/* Next task to wake */ |
p = s1->blocked.first; |
695,16 → 673,16 |
kern_cli(); |
if (sem_table[*sem].blocked.first == NIL) |
if (iq_isempty(&sem_table[*sem].blocked)) |
/* the sem is free */ |
*sval = sem_table[*sem].count; |
else { |
/* the sem is busy */ |
*sval = 0; |
p = sem_table[*sem].blocked.first; |
p = iq_query_first(&sem_table[*sem].blocked); |
do { |
(*sval)--; |
p = proc_table[p].next; |
p = iq_query_next(p, &sem_table[*sem].blocked); |
} while (p != NIL); |
} |
/shark/tags/rel_0_4/kernel/modules/ss.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: ss.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: ss.c,v 1.4 2003-01-07 17:07:51 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:51 $ |
------------ |
This file contains the aperiodic Sporadic Server (SS). |
125,6 → 125,7 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/* For debugging purpose */ |
//#define DEBUG 1 |
155,7 → 156,7 |
bandwidth_t U; /*+ the used bandwidth by the server +*/ |
QQUEUE wait; /*+ the wait queue of the SS +*/ |
IQUEUE wait; /*+ the wait queue of the SS +*/ |
PID activated; /*+ the task inserted in another queue +*/ |
int flags; /*+ the init flags... +*/ |
174,7 → 175,7 |
} SS_level_des; |
/*+ function prototypes +*/ |
void SS_level_status(LEVEL l); |
void SS_internal_status(LEVEL l); |
static void SS_replenish_timer(void *arg); |
/*-------------------------------------------------------------------*/ |
313,8 → 314,8 |
if(ssq_inslast(l, lev->replenish_amount) == NIL) { |
kern_printf("SS: no more space to post replenishment\n"); |
kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n"); |
SS_level_status(l); |
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow); |
SS_internal_status(l); |
kern_raise(XINVALID_SS_REPLENISH,exec_shadow); |
#ifdef DEBUG |
sys_abort(-1); |
exit(-1); |
324,8 → 325,8 |
} |
else { |
kern_printf("SS not active when posting R.A.\n"); |
SS_level_status(l); |
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow); |
SS_internal_status(l); |
kern_raise(XINVALID_SS_REPLENISH,exec_shadow); |
#ifdef DEBUG |
sys_abort(-1); |
exit(-1); |
368,8 → 369,7 |
job_task_default_model(j,lev->lastdline); |
job_task_def_period(j,lev->period); |
level_table[m]->guest_create(m,p,(TASK_MODEL *)&j); |
level_table[m]->guest_activate(m,p); |
level_table[m]->private_insert(m,p,(TASK_MODEL *)&j); |
#ifdef DEBUG |
kern_printf("PID:%p lastdl:%d.%d ",p,lev->lastdline.tv_sec,lev->lastdline.tv_nsec); |
400,8 → 400,8 |
if(ssq_inslast(l, tx+lev->replenish_amount) == NIL) { |
kern_printf("SS: no more space to post replenishment\n"); |
kern_printf(" You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n"); |
SS_level_status(l); |
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow); |
SS_internal_status(l); |
kern_raise(XINVALID_SS_REPLENISH,exec_shadow); |
#ifdef DEBUG |
sys_abort(-1); |
exit(-1); |
456,8 → 456,8 |
else { |
/* replenish queue is empty */ |
kern_printf("Replenish Timer fires but no Replenish Amount defined\n"); |
SS_level_status(l); |
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow); |
SS_internal_status(l); |
kern_raise(XINVALID_SS_REPLENISH,exec_shadow); |
#ifdef DEBUG |
sys_abort(-1); |
exit(-1); |
465,13 → 465,13 |
} |
if (lev->availCs > 0 && lev->activated == NIL) { |
if (qq_queryfirst(&lev->wait) != NIL) { |
lev->activated = qq_getfirst(&lev->wait); |
if (iq_query_first(&lev->wait) != NIL) { |
lev->activated = iq_getfirst(&lev->wait); |
/* if server is active, replenish time already set */ |
if (lev->server_active == SS_SERVER_NOTACTIVE) { |
lev->server_active = SS_SERVER_ACTIVE; |
/* set replenish time */ |
ll_gettime(TIME_EXACT, &ty); |
kern_gettime(&ty); |
ADDUSEC2TIMESPEC(lev->period, &ty); |
TIMESPEC_ASSIGN(&lev->lastdline, &ty); |
#ifdef DEBUG |
488,7 → 488,7 |
static char *SS_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
return "Unavailable"; //status_to_a(status); |
switch (status) { |
case SS_WAIT : return "SS_Wait"; |
501,42 → 501,10 |
/*** Level functions ***/ |
static int SS_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
void SS_internal_status(LEVEL l) |
{ |
#ifdef DEBUG |
kern_printf("SS_levacctm cl=%d ",m->pclass); |
#endif |
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) { |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
if (s->periodicity == APERIODIC) { |
#ifdef DEBUG |
kern_printf("AcceptApe "); |
#endif |
return 0; |
} |
#ifdef DEBUG |
kern_printf("NAcceptApe "); |
#endif |
} |
#ifdef DEBUG |
kern_printf("NAccept "); |
#endif |
return -1; |
} |
static int SS_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
/* SS doesn't handles guest tasks */ |
return -1; |
} |
void SS_level_status(LEVEL l) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->wait); |
PID p = iq_query_first(&lev->wait); |
kern_printf("On-line guarantee : %s\n", |
(lev->flags & SS_ENABLE_GUARANTEE_EDF || |
554,8 → 522,8 |
kern_printf("Activated: Pid: %d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n", |
lev->activated, |
proc_table[lev->activated].name, |
proc_table[lev->activated].timespec_priority.tv_sec, |
proc_table[lev->activated].timespec_priority.tv_nsec, |
iq_query_timespec(lev->activated,&lev->wait)->tv_sec, |
iq_query_timespec(lev->activated,&lev->wait)->tv_nsec, |
lev->nact[lev->activated], |
SS_status_to_a(proc_table[lev->activated].status)); |
564,23 → 532,12 |
p, |
proc_table[p].name, |
SS_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
p = iq_query_next(p, &lev->wait); |
} |
} |
static PID SS_level_scheduler(LEVEL l) |
static PID SS_public_schedulerbackground(LEVEL l) |
{ |
#ifdef DEBUG |
kern_printf("SS_levsch "); |
#endif |
/* the SS don't schedule anything... |
it's an RM level or similar that do it! */ |
return NIL; |
} |
static PID SS_level_schedulerbackground(LEVEL l) |
{ |
/* the SS catch the background time to exec aperiodic activities */ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
593,11 → 550,11 |
if (lev->flags & SS_BACKGROUND_BLOCK) |
return NIL; |
else |
return qq_queryfirst(&lev->wait); |
return iq_query_first(&lev->wait); |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int SS_level_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth) |
static int SS_public_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
613,7 → 570,7 |
return 0; |
} |
static int SS_level_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth) |
static int SS_public_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
634,17 → 591,22 |
/*** Task functions ***/ |
static int SS_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int SS_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; /* if the SS_task_create is |
called, the pclass must |
be a valid pclass. */ |
SOFT_TASK_MODEL *s; |
#ifdef DEBUG |
kern_printf("SS_taskcre "); |
#endif |
if (m->pclass != SOFT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (s->periodicity != APERIODIC) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (s->arrivals == SAVE_ARRIVALS) |
lev->nact[p] = 0; |
else |
653,19 → 615,8 |
return 0; /* OK, also if the task cannot be guaranteed */ |
} |
static void SS_task_detach(LEVEL l, PID p) |
static void SS_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* No cleanups to do here. |
SS level doesn't introduce any dynamic allocated field. */ |
} |
static int SS_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* If the task p is chosen, it is always eligible */ |
} |
static void SS_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
696,7 → 647,7 |
to exe before calling task_dispatch. |
We have to check lev->activated != p instead */ |
if (lev->activated != p) { |
qq_extract(p, &lev->wait); |
iq_extract(p, &lev->wait); |
#ifdef DEBUG |
kern_printf("extr task:%d ",p); |
#endif |
706,7 → 657,7 |
if (nostop) kern_printf("(gd status=%d)",proc_table[p].status); |
#endif |
level_table[lev->scheduling_level]-> |
guest_dispatch(lev->scheduling_level,p,nostop); |
private_dispatch(lev->scheduling_level,p,nostop); |
} |
/* set capacity timer */ |
723,7 → 674,7 |
} |
} |
static void SS_task_epilogue(LEVEL l, PID p) { |
static void SS_public_epilogue(LEVEL l, PID p) { |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
765,8 → 716,8 |
if(ssq_inslast(l, lev->replenish_amount) == NIL) { |
kern_printf("SS: no more space to post replenishment\n"); |
kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n"); |
SS_level_status(l); |
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow); |
SS_internal_status(l); |
kern_raise(XINVALID_SS_REPLENISH,exec_shadow); |
#ifdef DEBUG |
sys_abort(-1); |
exit(-1); |
777,9 → 728,9 |
} |
if (lev->activated == p) |
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p); |
level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p); |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = SS_WAIT; |
lev->activated = NIL; |
} |
786,20 → 737,20 |
else { |
/* The task has been preempted. |
It returns into the ready queue or to the |
wait queue by calling the guest_epilogue... */ |
wait queue by calling the private_epilogue... */ |
if (lev->activated == p) { /* goes into ready queue */ |
level_table[ lev->scheduling_level ]-> |
guest_epilogue(lev->scheduling_level,p); |
private_epilogue(lev->scheduling_level,p); |
} |
else { /* goes into wait queue */ |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = SS_WAIT; |
} |
} |
} |
static void SS_task_activate(LEVEL l, PID p) |
static void SS_public_activate(LEVEL l, PID p) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
812,8 → 763,6 |
if (lev->nact[p] != -1) lev->nact[p]++; |
} |
else if (proc_table[p].status == SLEEP) { |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
// kern_printf("-%d.%d- ",proc_table[p].request_time.tv_sec,proc_table[p].request_time.tv_nsec); |
if (lev->activated == NIL && lev->availCs > 0) { |
if(!BACKGROUND_ON) { |
/* if server is active, replenish time already set */ |
820,7 → 769,7 |
if (lev->server_active == SS_SERVER_NOTACTIVE) { |
lev->server_active = SS_SERVER_ACTIVE; |
/* set replenish time */ |
TIMESPEC_ASSIGN(&ty, &proc_table[p].request_time); |
kern_gettime(&ty); |
ADDUSEC2TIMESPEC(lev->period, &ty); |
TIMESPEC_ASSIGN(&lev->lastdline, &ty); |
#ifdef DEBUG |
833,7 → 782,7 |
SS_activation(lev); |
} |
else { |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
proc_table[p].status = SS_WAIT; |
} |
} |
847,7 → 796,7 |
} |
} |
static void SS_task_insert(LEVEL l, PID p) |
static void SS_public_unblock(LEVEL l, PID p) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
860,11 → 809,11 |
/* when we reinsert the task into the system, the server capacity |
is always 0 because nobody executes with the SS before... */ |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = SS_WAIT; |
} |
static void SS_task_extract(LEVEL l, PID p) |
static void SS_public_block(LEVEL l, PID p) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
883,10 → 832,10 |
lev->flags |= SS_BACKGROUND_BLOCK; |
if (lev->activated == p) |
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p); |
level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p); |
} |
static void SS_task_endcycle(LEVEL l, PID p) |
static int SS_public_message(LEVEL l, PID p, void *m) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
910,13 → 859,13 |
} |
if (lev->activated == p) |
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p); |
level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p); |
else |
qq_extract(p, &lev->wait); |
iq_extract(p, &lev->wait); |
if (lev->nact[p] > 0) { |
lev->nact[p]--; |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
proc_table[p].status = SS_WAIT; |
} |
else { |
923,7 → 872,7 |
proc_table[p].status = SLEEP; |
} |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) { |
SS_activation(lev); |
} |
933,9 → 882,14 |
SS_set_ra(l); |
} |
} |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
return 0; |
} |
static void SS_task_end(LEVEL l, PID p) |
static void SS_public_end(LEVEL l, PID p) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
959,12 → 913,12 |
} |
if (lev->activated == p) |
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p); |
level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p); |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
iq_insertfirst(p,&freedesc); |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) { |
SS_activation(lev); |
} |
976,134 → 930,14 |
} |
} |
static void SS_task_sleep(LEVEL l, PID p) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
int tx; |
#ifdef DEBUG |
kern_printf("SS_tasksle "); |
#endif |
/* update the server capacity */ |
if (BACKGROUND_ON) |
lev->flags &= ~SS_BACKGROUND; |
else { |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
lev->availCs -= tx; |
lev->replenish_amount += tx; |
#ifdef DEBUG |
kern_printf("PID:%d RA=%d ",p,lev->replenish_amount); |
#endif |
} |
lev->nact[p] = 0; |
if (lev->activated == p) |
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p); |
else |
qq_extract(p, &lev->wait); |
proc_table[p].status = SLEEP; |
lev->activated = qq_getfirst(&lev->wait); |
if (lev->activated != NIL) { |
SS_activation(lev); |
} |
else { |
if(!(BACKGROUND_ON)){ |
/* No more task to schedule; set replenish amount */ |
SS_set_ra(l); |
} |
} |
} |
static void SS_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
int tx; |
#ifdef DEBUG |
kern_printf("SS_tdelay "); |
#endif |
/* update the server capacity */ |
if (BACKGROUND_ON) |
lev->flags &= ~SS_BACKGROUND; |
else { |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
lev->availCs -= tx; |
lev->replenish_amount += tx; |
#ifdef DEBUG |
kern_printf("PID:%d RA=%d ",p,lev->replenish_amount); |
#endif |
/* Here set replenish amount because delay may be too long and |
replenish time could arrive */ |
SS_set_ra(l); |
} |
/* I hope no delay when owning a mutex... */ |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_delay(lev->scheduling_level,p,usdelay); |
} |
/*-------------------------------------------------------------------*/ |
/*** Guest functions ***/ |
/* SS doesn't handles guest tasks */ |
static int SS_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void SS_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/*-------------------------------------------------------------------*/ |
/*** Registration functions ***/ |
/*+ Registration function: |
int flags the init flags ... see SS.h +*/ |
void SS_register_level(int flags, LEVEL master, int Cs, int per) |
LEVEL SS_register_level(int flags, LEVEL master, int Cs, int per) |
{ |
LEVEL l; /* the level that we register */ |
SS_level_des *lev; /* for readableness only */ |
1110,63 → 944,33 |
PID i; /* a counter */ |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
#ifdef DEBUG |
kern_printf("Alloc des %d ",l); |
#endif |
l = level_alloc_descriptor(sizeof(SS_level_des)); |
/* alloc the space needed for the SS_level_des */ |
lev = (SS_level_des *)kern_alloc(sizeof(SS_level_des)); |
lev = (SS_level_des *)level_table[l]; |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
printk(" lev=%d\n",(int)lev); |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, SS_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = SS_LEVEL_CODE; |
lev->l.level_version = SS_LEVEL_VERSION; |
lev->l.level_accept_task_model = SS_level_accept_task_model; |
lev->l.level_accept_guest_model = SS_level_accept_guest_model; |
lev->l.level_status = SS_level_status; |
if (flags & SS_ENABLE_BACKGROUND) |
lev->l.level_scheduler = SS_level_schedulerbackground; |
else |
lev->l.level_scheduler = SS_level_scheduler; |
lev->l.public_scheduler = SS_public_schedulerbackground; |
if (flags & SS_ENABLE_GUARANTEE_EDF) |
lev->l.level_guarantee = SS_level_guaranteeEDF; |
lev->l.public_guarantee = SS_public_guaranteeEDF; |
else if (flags & SS_ENABLE_GUARANTEE_RM) |
lev->l.level_guarantee = SS_level_guaranteeRM; |
lev->l.public_guarantee = SS_public_guaranteeRM; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.task_create = SS_task_create; |
lev->l.task_detach = SS_task_detach; |
lev->l.task_eligible = SS_task_eligible; |
lev->l.task_dispatch = SS_task_dispatch; |
lev->l.task_epilogue = SS_task_epilogue; |
lev->l.task_activate = SS_task_activate; |
lev->l.task_insert = SS_task_insert; |
lev->l.task_extract = SS_task_extract; |
lev->l.task_endcycle = SS_task_endcycle; |
lev->l.task_end = SS_task_end; |
lev->l.task_sleep = SS_task_sleep; |
lev->l.task_delay = SS_task_delay; |
lev->l.public_create = SS_public_create; |
lev->l.public_end = SS_public_end; |
lev->l.public_dispatch = SS_public_dispatch; |
lev->l.public_epilogue = SS_public_epilogue; |
lev->l.public_activate = SS_public_activate; |
lev->l.public_unblock = SS_public_unblock; |
lev->l.public_block = SS_public_block; |
lev->l.public_message = SS_public_message; |
lev->l.guest_create = SS_guest_create; |
lev->l.guest_detach = SS_guest_detach; |
lev->l.guest_dispatch = SS_guest_dispatch; |
lev->l.guest_epilogue = SS_guest_epilogue; |
lev->l.guest_activate = SS_guest_activate; |
lev->l.guest_insert = SS_guest_insert; |
lev->l.guest_extract = SS_guest_extract; |
lev->l.guest_endcycle = SS_guest_endcycle; |
lev->l.guest_end = SS_guest_end; |
lev->l.guest_sleep = SS_guest_sleep; |
lev->l.guest_delay = SS_guest_delay; |
/* fill the SS descriptor part */ |
for (i=0; i<MAX_PROC; i++) |
1177,7 → 981,7 |
lev->period = per; |
qq_init(&lev->wait); |
iq_init(&lev->wait, &freedesc, 0); |
lev->activated = NIL; |
lev->U = (MAX_BANDWIDTH / per) * Cs; |
1195,23 → 999,19 |
lev->rcount=0; |
lev->replenish_amount=0; |
lev->server_active=SS_SERVER_NOTACTIVE; |
return l; |
} |
bandwidth_t SS_usedbandwidth(LEVEL l) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
if (lev->l.level_code == SS_LEVEL_CODE && |
lev->l.level_version == SS_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
int SS_availCs(LEVEL l) { |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
if (lev->l.level_code == SS_LEVEL_CODE && |
lev->l.level_version == SS_LEVEL_VERSION) |
return lev->availCs; |
else |
return 0; |
return lev->availCs; |
} |
/shark/tags/rel_0_4/kernel/modules/tbs.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: tbs.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: tbs.c,v 1.4 2003-01-07 17:07:51 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:51 $ |
------------ |
This file contains the aperiodic server TBS (Total Bandwidth Server) |
60,6 → 60,7 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/*+ 4 debug purposes +*/ |
#undef TBS_TEST |
84,7 → 85,7 |
struct timespec lastdline; /*+ the last deadline assigned to |
a TBS task +*/ |
QQUEUE wait; /*+ the wait queue of the TBS +*/ |
IQUEUE wait; /*+ the wait queue of the TBS +*/ |
PID activated; /*+ the task inserted in another queue +*/ |
int flags; /*+ the init flags... +*/ |
97,19 → 98,6 |
} TBS_level_des; |
static char *TBS_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case TBS_WCET_VIOLATED: return "TBS_Wcet_Violated"; |
case TBS_WAIT : return "TBS_Wait"; |
default : return "TBS_Unknown"; |
} |
} |
#ifdef TESTG |
#include "drivers/glib.h" |
#endif |
131,9 → 119,6 |
/* we compute a suitable deadline for the task */ |
drel = (proc_table[p].wcet * lev->band_den) / lev->band_num; |
if (TIMESPEC_A_GT_B(&proc_table[p].request_time, &lev->lastdline)) |
TIMESPEC_ASSIGN(&lev->lastdline, &proc_table[p].request_time ); |
ADDUSEC2TIMESPEC(drel, &lev->lastdline); |
#ifdef TESTG |
147,8 → 132,7 |
/* and we insert the task in another level */ |
m = lev->scheduling_level; |
job_task_default_model(j,lev->lastdline); |
level_table[m]->guest_create(m,p,(TASK_MODEL *)&j); |
level_table[m]->guest_activate(m,p); |
level_table[m]->private_insert(m,p,(TASK_MODEL *)&j); |
#ifdef TBS_TEST |
kern_printf("TBS_activation: lastdline %ds %dns\n",lev->lastdline.tv_sec,lev->lastdline.tv_nsec); |
176,74 → 160,8 |
#endif |
} |
static int TBS_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) { |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
if (s->wcet && s->periodicity == APERIODIC) |
return 0; |
} |
return -1; |
} |
static int TBS_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
static void TBS_level_status(LEVEL l) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->wait); |
kern_printf("Wcet Check : %s\n", |
onoff(lev->flags & TBS_ENABLE_WCET_CHECK)); |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & TBS_ENABLE_GUARANTEE)); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
kern_printf("Last deadline : %lds %ldns\n",lev->lastdline.tv_sec, |
lev->lastdline.tv_nsec); |
if (lev->activated != -1) |
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%9ld nact: %d Stat: %s\n", |
lev->activated, |
proc_table[lev->activated].name, |
proc_table[lev->activated].timespec_priority.tv_sec, |
proc_table[lev->activated].timespec_priority.tv_nsec, |
lev->nact[lev->activated], |
TBS_status_to_a(proc_table[lev->activated].status)); |
while (p != NIL) { |
kern_printf("Pid: %2d Name: %10s Stat: %s\n", |
p, |
proc_table[p].name, |
TBS_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
} |
static PID TBS_level_scheduler(LEVEL l) |
{ |
/* the TBS don't schedule anything... |
it's an EDF level or similar that do it! */ |
return NIL; |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int TBS_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int TBS_public_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
255,14 → 173,19 |
return 0; |
} |
static int TBS_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int TBS_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
/* if the TBS_task_create is called, then the pclass must be a |
valid pclass. */ |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
SOFT_TASK_MODEL *s; |
if (m->pclass != SOFT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (!(s->wcet && s->periodicity == APERIODIC)) return -1; |
proc_table[p].wcet = s->wcet; |
/* Enable wcet check */ |
278,26 → 201,8 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
static void TBS_task_detach(LEVEL l, PID p) |
static void TBS_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the TBS level doesn't introduce any dinamic allocated new field. */ |
} |
static int TBS_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void TBS_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
/* there is at least one task ready inserted in an EDF or similar |
304,20 → 209,10 |
level */ |
level_table[ lev->scheduling_level ]-> |
guest_dispatch(lev->scheduling_level,p,nostop); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
private_dispatch(lev->scheduling_level,p,nostop); |
} |
static void TBS_task_epilogue(LEVEL l, PID p) |
static void TBS_public_epilogue(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
331,7 → 226,7 |
have to be put in place... this code is identical to the |
TBS_task_end */ |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
/* we reclaim an avail time that can be <0 due to the timer |
approximations -> we have to postpone the deadline a little! |
345,7 → 240,7 |
lev->lastdline.tv_sec, lev->lastdline.tv_nsec); |
#endif |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
TBS_activation(lev); |
} |
353,18 → 248,22 |
/* the task has been preempted. it returns into the ready queue by |
calling the guest_epilogue... */ |
level_table[ lev->scheduling_level ]-> |
guest_epilogue(lev->scheduling_level,p); |
private_epilogue(lev->scheduling_level,p); |
} |
static void TBS_task_activate(LEVEL l, PID p) |
static void TBS_public_activate(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
struct timespec t; |
if (proc_table[p].status == SLEEP || |
proc_table[p].status == TBS_WCET_VIOLATED) { |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
kern_gettime(&t); |
if (TIMESPEC_A_GT_B(&t, &lev->lastdline)) |
TIMESPEC_ASSIGN(&lev->lastdline, &t ); |
if (lev->activated == NIL) { |
/* This is the first task in the level, so we activate it immediately */ |
lev->activated = p; |
372,7 → 271,7 |
} |
else { |
proc_table[p].status = TBS_WAIT; |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
} |
} |
else if (lev->flag[p] & TBS_SAVE_ARRIVALS) |
381,23 → 280,25 |
kern_printf("TBSREJ!!!");*/ |
} |
static void TBS_task_insert(LEVEL l, PID p) |
static void TBS_public_unblock(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
JOB_TASK_MODEL j; |
level_table[ lev->scheduling_level ]-> |
guest_insert(lev->scheduling_level,p); |
job_task_default_model(j,lev->lastdline); |
level_table[lev->scheduling_level]-> |
private_insert(lev->scheduling_level,p,(TASK_MODEL *)&j); |
} |
static void TBS_task_extract(LEVEL l, PID p) |
static void TBS_public_block(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
level_table[ lev->scheduling_level ]-> |
guest_extract(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
} |
static void TBS_task_endcycle(LEVEL l, PID p) |
static int TBS_public_message(LEVEL l, PID p, void *m) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
405,7 → 306,7 |
that implements a single activation, so we have to call |
the guest_end, that representsa single activation... */ |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
TBS_bandwidth_reclaiming(lev,p); |
417,105 → 318,38 |
// lev->nact[p] can be >0 only if the SAVE_ARRIVALS bit is set |
lev->nact[p]--; |
proc_table[p].status = TBS_WAIT; |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
} |
else |
proc_table[p].status = SLEEP; |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
TBS_activation(lev); |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
return 0; |
} |
static void TBS_task_end(LEVEL l, PID p) |
static void TBS_public_end(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
TBS_bandwidth_reclaiming(lev,p); |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
iq_insertfirst(p,&freedesc); |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
TBS_activation(lev); |
} |
static void TBS_task_sleep(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
/* a task activation is finished, but we are using a JOB_TASK_MODEL |
that implements a single activation, so we have to call |
the guest_end, that representsa single activation... */ |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
TBS_bandwidth_reclaiming(lev,p); |
/* we reset the capacity counters... */ |
if (lev->flags & TBS_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
proc_table[p].status = SLEEP; |
lev->nact[p] = 0; |
lev->activated = qq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
TBS_activation(lev); |
} |
static void TBS_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
level_table[ lev->scheduling_level ]-> |
guest_delay(lev->scheduling_level,p,usdelay); |
} |
static int TBS_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void TBS_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
/*+ Registration function: |
529,58 → 363,28 |
printk("TBS_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(TBS_level_des)); |
printk(" alloco descrittore %d %d\n",l,(int)sizeof(TBS_level_des)); |
lev = (TBS_level_des *)level_table[l]; |
/* alloc the space needed for the TBS_level_des */ |
lev = (TBS_level_des *)kern_alloc(sizeof(TBS_level_des)); |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, TBS_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = TBS_LEVEL_CODE; |
lev->l.level_version = TBS_LEVEL_VERSION; |
lev->l.level_accept_task_model = TBS_level_accept_task_model; |
lev->l.level_accept_guest_model = TBS_level_accept_guest_model; |
lev->l.level_status = TBS_level_status; |
lev->l.level_scheduler = TBS_level_scheduler; |
if (flags & TBS_ENABLE_GUARANTEE) |
lev->l.level_guarantee = TBS_level_guarantee; |
lev->l.public_guarantee = TBS_public_guarantee; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.task_create = TBS_task_create; |
lev->l.task_detach = TBS_task_detach; |
lev->l.task_eligible = TBS_task_eligible; |
lev->l.task_dispatch = TBS_task_dispatch; |
lev->l.task_epilogue = TBS_task_epilogue; |
lev->l.task_activate = TBS_task_activate; |
lev->l.task_insert = TBS_task_insert; |
lev->l.task_extract = TBS_task_extract; |
lev->l.task_endcycle = TBS_task_endcycle; |
lev->l.task_end = TBS_task_end; |
lev->l.task_sleep = TBS_task_sleep; |
lev->l.task_delay = TBS_task_delay; |
lev->l.public_guarantee = TBS_public_guarantee; |
lev->l.public_create = TBS_public_create; |
lev->l.public_end = TBS_public_end; |
lev->l.public_dispatch = TBS_public_dispatch; |
lev->l.public_epilogue = TBS_public_epilogue; |
lev->l.public_activate = TBS_public_activate; |
lev->l.public_unblock = TBS_public_unblock; |
lev->l.public_block = TBS_public_block; |
lev->l.public_message = TBS_public_message; |
lev->l.guest_create = TBS_guest_create; |
lev->l.guest_detach = TBS_guest_detach; |
lev->l.guest_dispatch = TBS_guest_dispatch; |
lev->l.guest_epilogue = TBS_guest_epilogue; |
lev->l.guest_activate = TBS_guest_activate; |
lev->l.guest_insert = TBS_guest_insert; |
lev->l.guest_extract = TBS_guest_extract; |
lev->l.guest_endcycle = TBS_guest_endcycle; |
lev->l.guest_end = TBS_guest_end; |
lev->l.guest_sleep = TBS_guest_sleep; |
lev->l.guest_delay = TBS_guest_delay; |
/* fill the TBS descriptor part */ |
for (i = 0; i < MAX_PROC; i++) { |
590,7 → 394,7 |
NULL_TIMESPEC(&lev->lastdline); |
qq_init(&lev->wait); |
iq_init(&lev->wait, &freedesc, 0); |
lev->activated = NIL; |
lev->U = (MAX_BANDWIDTH / den) * num; |
605,20 → 409,14 |
bandwidth_t TBS_usedbandwidth(LEVEL l) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
if (lev->l.level_code == TBS_LEVEL_CODE && |
lev->l.level_version == TBS_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
int TBS_get_nact(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
if (lev->l.level_code == TBS_LEVEL_CODE && |
lev->l.level_version == TBS_LEVEL_VERSION) |
return lev->nact[p]; |
else |
return -1; |
return lev->nact[p]; |
} |
/shark/tags/rel_0_4/kernel/modules/dummy.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: dummy.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: dummy.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the Dummy scheduling module |
58,7 → 58,6 |
#include <ll/string.h> |
#include <kernel/config.h> |
#include <sys/types.h> |
#include <modules/codes.h> |
#include <kernel/model.h> |
#include <kernel/descr.h> |
#include <kernel/var.h> |
74,42 → 73,21 |
} dummy_level_des; |
static int dummy_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
static PID dummy_public_scheduler(LEVEL l) |
{ |
dummy_level_des *lev = (dummy_level_des *)(level_table[l]); |
if ((m->pclass == DUMMY_PCLASS || m->pclass == (DUMMY_PCLASS | l)) |
&& lev->dummy == -1) |
return 0; |
else |
return -1; |
//kern_printf("DUMMYsched!!! %d", lev->dummy); |
return lev->dummy; |
} |
static int dummy_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
static int dummy_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
return -1; |
} |
static void dummy_level_status(LEVEL l) |
{ |
dummy_level_des *lev = (dummy_level_des *)(level_table[l]); |
kern_printf("dummy PID: %d\n", lev->dummy); |
}; |
if (m->pclass != DUMMY_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
if (lev->dummy != -1) return -1; |
static PID dummy_level_scheduler(LEVEL l) |
{ |
dummy_level_des *lev = (dummy_level_des *)(level_table[l]); |
//kern_printf("DUMMYsched!!! %d", lev->dummy); |
return lev->dummy; |
} |
/* There is not guarantee on this level!!! -> the entry must be null |
int (*level_guarantee)(LEVEL l, DWORD *freebandwidth); */ |
static int dummy_task_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
/* the dummy level doesn't introduce any new field in the TASK_MODEL |
so, all initialization stuffs are done by the task_create. |
the task state is set at SLEEP by the general task_create */ |
116,100 → 94,16 |
return 0; /* OK */ |
} |
static void dummy_task_detach(LEVEL l, PID p) |
static void dummy_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the dummy level doesn't introduce any new field in the TASK_MODEL |
so, all detach stuffs are done by the task_create |
The task state is set at FREE by the general task_create */ |
} |
static int dummy_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
static void dummy_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* nothing... the dummy hangs the cpu waiting for interrupts... */ |
if (0)//testactive) |
{ |
s_stime[useds]= schedule_time; |
s_curr[useds] = -1; |
s_PID[useds] = p; |
useds++; |
} |
//kern_printf("ÛDUMMYÛ"); |
} |
static void dummy_task_epilogue(LEVEL l, PID p) |
static void dummy_public_epilogue(LEVEL l, PID p) |
{ |
proc_table[p].status = SLEEP; /* Paranoia */ |
} |
static void dummy_task_activate(LEVEL l, PID p) |
{ kern_printf("Dummy1"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static void dummy_task_insert(LEVEL l, PID p) |
{ kern_printf("Dummy2"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static void dummy_task_extract(LEVEL l, PID p) |
{ kern_printf("Dummy3"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static void dummy_task_endcycle(LEVEL l, PID p) |
{ kern_printf("Dummy4"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static void dummy_task_end(LEVEL l, PID p) |
{ kern_printf("Dummy5"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static void dummy_task_sleep(LEVEL l, PID p) |
{ kern_printf("Dummy6"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static void dummy_task_delay(LEVEL l, PID p, TIME tickdelay) |
{ kern_printf("Dummy7"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static int dummy_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_printf("Dummy8"); kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void dummy_guest_detach(LEVEL l, PID p) |
{ kern_printf("Dummy9"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_printf("Dummy0"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_epilogue(LEVEL l, PID p) |
{ kern_printf("Dummya"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_activate(LEVEL l, PID p) |
{ kern_printf("Dummyb"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_insert(LEVEL l, PID p) |
{ kern_printf("Dummyc"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_extract(LEVEL l, PID p) |
{ kern_printf("Dummyd"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_endcycle(LEVEL l, PID p) |
{ kern_printf("Dummye"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_end(LEVEL l, PID p) |
{ kern_printf("Dummyf"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_sleep(LEVEL l, PID p) |
{ kern_printf("Dummyg"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_printf("Dummyh"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
/*+ Dummy task must be present & cannot be killed; +*/ |
static TASK dummy() |
{ |
252,7 → 146,7 |
if (p == NIL) |
printk("\nPanic!!! can't create dummy task...\n"); |
/* dummy must block all tasks... */ |
/* dummy must block all signals... */ |
proc_table[p].sigmask = 0xFFFFFFFF; |
} |
261,57 → 155,27 |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void dummy_register_level() |
LEVEL dummy_register_level() |
{ |
LEVEL l; /* the level that we register */ |
dummy_level_des *lev; /* for readableness only */ |
printk("Entro in dummy_register_level\n"); |
printk("Inside dummy_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(dummy_level_des)); |
/* alloc the space needed for the dummy_level_des */ |
lev = (dummy_level_des *)kern_alloc(sizeof(dummy_level_des)); |
lev = (dummy_level_des *)level_table[l]; |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
printk(" lev=%d\n",(int)lev); |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, DUMMY_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = DUMMY_LEVEL_CODE; |
lev->l.level_version = DUMMY_LEVEL_VERSION; |
lev->l.public_scheduler = dummy_public_scheduler; |
lev->l.public_guarantee = NULL; |
lev->l.public_create = dummy_public_create; |
lev->l.public_dispatch = dummy_public_dispatch; |
lev->l.public_epilogue = dummy_public_epilogue; |
lev->l.level_accept_task_model = dummy_level_accept_task_model; |
lev->l.level_accept_guest_model = dummy_level_accept_guest_model; |
lev->l.level_status = dummy_level_status; |
lev->l.level_scheduler = dummy_level_scheduler; |
lev->l.level_guarantee = NULL; /* No guarantee! */ |
lev->l.task_create = dummy_task_create; |
lev->l.task_detach = dummy_task_detach; |
lev->l.task_eligible = dummy_task_eligible; |
lev->l.task_dispatch = dummy_task_dispatch; |
lev->l.task_epilogue = dummy_task_epilogue; |
lev->l.task_activate = dummy_task_activate; |
lev->l.task_insert = dummy_task_insert; |
lev->l.task_extract = dummy_task_extract; |
lev->l.task_endcycle = dummy_task_endcycle; |
lev->l.task_end = dummy_task_end; |
lev->l.task_sleep = dummy_task_sleep; |
lev->l.task_delay = dummy_task_delay; |
lev->l.guest_create = dummy_guest_create; |
lev->l.guest_detach = dummy_guest_detach; |
lev->l.guest_dispatch = dummy_guest_dispatch; |
lev->l.guest_epilogue = dummy_guest_epilogue; |
lev->l.guest_activate = dummy_guest_activate; |
lev->l.guest_insert = dummy_guest_insert; |
lev->l.guest_extract = dummy_guest_extract; |
lev->l.guest_endcycle = dummy_guest_endcycle; |
lev->l.guest_end = dummy_guest_end; |
lev->l.guest_sleep = dummy_guest_sleep; |
lev->l.guest_delay = dummy_guest_delay; |
/* the dummy process will be created at init_time. |
see also dummy_level_accept_model,dummy_create */ |
lev->dummy = -1; |
319,4 → 183,6 |
printk("\tPosto dummy_create\n"); |
sys_atrunlevel(dummy_create,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
/shark/tags/rel_0_4/kernel/modules/nop.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: nop.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: nop.c,v 1.3 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
Binary Semaphores. see nop.h for more details... |
58,7 → 58,6 |
#include <ll/string.h> |
#include <kernel/const.h> |
#include <sys/types.h> |
#include <modules/codes.h> |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
73,7 → 72,7 |
mutex_t structure */ |
typedef struct { |
PID owner; |
QQUEUE blocked; |
IQUEUE blocked; |
} NOP_mutex_t; |
80,40 → 79,21 |
/* Wait status for this library */ |
#define NOP_WAIT LIB_STATUS_BASE |
/*+ print resource protocol statistics...+*/ |
static void NOP_resource_status(RLEVEL r) |
static int NOP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
kern_printf("No status for NOP module\n"); |
} |
static int NOP_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
{ |
/* priority inheritance works with all tasks without Resource parameters */ |
return -1; |
} |
static void NOP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* never called!!! */ |
} |
static void NOP_res_detach(RLEVEL l, PID p) |
{ |
} |
static int NOP_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a) |
{ |
if (a->mclass == NOP_MCLASS || a->mclass == (NOP_MCLASS | l) ) |
return 0; |
else |
return -1; |
} |
static int NOP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
NOP_mutex_t *p; |
if (a->mclass != NOP_MCLASS) |
return -1; |
p = (NOP_mutex_t *) kern_alloc(sizeof(NOP_mutex_t)); |
124,7 → 104,7 |
return (ENOMEM); |
p->owner = NIL; |
qq_init(&p->blocked); |
iq_init(&p->blocked, &freedesc, 0); |
m->mutexlevel = l; |
m->opt = (void *)p; |
172,27 → 152,16 |
if (p->owner != NIL) { /* We must block exec task */ |
LEVEL l; /* for readableness only */ |
TIME tx; /* a dummy TIME for timespec operations */ |
struct timespec ty; /* a dummy timespec for timespec operations */ |
proc_table[exec_shadow].context = kern_context_save(); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task in the semaphore queue */ |
proc_table[exec_shadow].status = NOP_WAIT; |
qq_insertlast(exec_shadow,&p->blocked); |
iq_insertlast(exec_shadow,&p->blocked); |
/* and finally we reschedule */ |
exec = exec_shadow = -1; |
253,10 → 222,10 |
proc_table[exec_shadow].context = kern_context_save(); |
/* the mutex is mine, pop the firsttask to extract */ |
p->owner = qq_getfirst(&p->blocked); |
p->owner = iq_getfirst(&p->blocked); |
if (p->owner != NIL) { |
l = proc_table[p->owner].task_level; |
level_table[l]->task_insert(l,p->owner); |
level_table[l]->public_unblock(l,p->owner); |
} |
scheduler(); |
265,7 → 234,7 |
return 0; |
} |
void NOP_register_module(void) |
RLEVEL NOP_register_module(void) |
{ |
RLEVEL l; /* the level that we register */ |
NOP_mutex_resource_des *m; /* for readableness only */ |
282,20 → 251,11 |
resource_table[l] = (resource_des *)m; |
/* fill the resource_des descriptor */ |
strncpy(m->m.r.res_name, NOP_MODULENAME, MAX_MODULENAME); |
m->m.r.res_code = NOP_MODULE_CODE; |
m->m.r.res_version = NOP_MODULE_VERSION; |
m->m.r.rtype = MUTEX_RTYPE; |
m->m.r.resource_status = NOP_resource_status; |
m->m.r.level_accept_resource_model = NOP_level_accept_resource_model; |
m->m.r.res_register = NOP_res_register; |
m->m.r.res_detach = NOP_res_detach; |
/* fill the mutex_resource_des descriptor */ |
m->m.level_accept_mutexattr = NOP_level_accept_mutexattr; |
m->m.init = NOP_init; |
m->m.destroy = NOP_destroy; |
m->m.lock = NOP_lock; |
302,5 → 262,6 |
m->m.trylock = NOP_trylock; |
m->m.unlock = NOP_unlock; |
return l; |
} |
/shark/tags/rel_0_4/kernel/modules/npp.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: npp.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: npp.c,v 1.2 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
Non Preemptive Protocol. see npp.h for more details... |
56,7 → 56,6 |
#include <ll/ll.h> |
#include <ll/string.h> |
#include <ll/stdio.h> |
#include <modules/codes.h> |
#include <kernel/const.h> |
#include <sys/types.h> |
#include <kernel/descr.h> |
71,6 → 70,7 |
} NPP_mutex_resource_des; |
#if 0 |
/*+ print resource protocol statistics...+*/ |
static void NPP_resource_status(RLEVEL r) |
{ |
78,18 → 78,14 |
kern_printf("%d Resources owned by the tasks %d\n", m->nlocked, exec_shadow); |
} |
#endif |
static int NPP_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
static int NPP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* NPP works with all tasks without Resource parameters */ |
return -1; |
} |
static void NPP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* never called!!! */ |
} |
static void NPP_res_detach(RLEVEL l, PID p) |
{ |
NPP_mutex_resource_des *m = (NPP_mutex_resource_des *)(resource_table[l]); |
98,16 → 94,11 |
kern_raise(XMUTEX_OWNER_KILLED, p); |
} |
static int NPP_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a) |
static int NPP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
if (a->mclass == NPP_MCLASS || a->mclass == (NPP_MCLASS | l) ) |
return 0; |
else |
if (a->mclass != NPP_MCLASS) |
return -1; |
} |
static int NPP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
m->mutexlevel = l; |
m->opt = (void *)NIL; |
187,20 → 178,11 |
resource_table[l] = (resource_des *)m; |
/* fill the resource_des descriptor */ |
strncpy(m->m.r.res_name, NPP_MODULENAME, MAX_MODULENAME); |
m->m.r.res_code = NPP_MODULE_CODE; |
m->m.r.res_version = NPP_MODULE_VERSION; |
m->m.r.rtype = MUTEX_RTYPE; |
m->m.r.resource_status = NPP_resource_status; |
m->m.r.level_accept_resource_model = NPP_level_accept_resource_model; |
m->m.r.res_register = NPP_res_register; |
m->m.r.res_detach = NPP_res_detach; |
/* fill the mutex_resource_des descriptor */ |
m->m.level_accept_mutexattr = NPP_level_accept_mutexattr; |
m->m.init = NPP_init; |
m->m.destroy = NPP_destroy; |
m->m.lock = NPP_lock; |
/shark/tags/rel_0_4/kernel/modules/hartport.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: hartport.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: hartport.c,v 1.3 2002-11-11 08:32:06 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2002-11-11 08:32:06 $ |
------------ |
This file contains the Hartik 3.3.1 Port functions |
110,8 → 110,8 |
struct hash_port htable[MAX_HASH_ENTRY]; |
struct port_ker port_des[MAX_PORT]; |
struct port_com port_int[MAX_PORT_INT]; |
QUEUE freeportdes; |
QUEUE freeportint; |
int freeportdes; |
int freeportint; |
static int port_installed = 0; |
548,7 → 548,7 |
return -1; |
} |
if (!pd->valid) { |
errno = EPORT_UNVALID_DESCR; |
errno = EPORT_INVALID_DESCR; |
return -1; |
} |
596,7 → 596,7 |
return -1; |
} |
if (!pd->valid) { |
errno = EPORT_UNVALID_DESCR; |
errno = EPORT_INVALID_DESCR; |
return -1; |
} |
#endif |
/shark/tags/rel_0_4/kernel/modules/trcudp.c |
---|
6,7 → 6,9 |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors : |
* Paolo Gai <pj@gandalf.sssup.it> |
* Massimiliano Giorgi <massy@gandalf.sssup.it> |
* Luca Abeni <luca@gandalf.sssup.it> |
* (see the web pages for full authors list) |
* |
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
16,6 → 18,26 |
* http://shark.sssup.it |
*/ |
/* |
* Copyright (C) 2002 Paolo Gai |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* |
* CVS : $Id: trcudp.c,v 1.3 2002-10-28 10:11:38 pj Exp $ |
*/ |
#include <ll/sys/types.h> |
#include <ll/stdlib.h> |
23,73 → 45,217 |
#include <kernel/mem.h> |
#include <kernel/log.h> |
#include <drivers/udpip.h> |
#include <trace/types.h> |
#include <trace/trace.h> |
#include <trace/queues.h> |
#include <fs/fs.h> |
//#define DEBUG_TRCUDP |
#include <unistd.h> |
#include <fcntl.h> |
#include <limits.h> |
#define TRCUDP_MAXEVENTS (1500/sizeof(trc_event_t)) |
//#define TRCUDP_MAXEVENTS 10 |
typedef struct TAGudp_queue_t { |
UDP_ADDR addr; |
trc_event_t evt; |
} udp_queue_t; |
/* Well... this file is very similar to trccirc.c! */ |
static trc_event_t *udp_get(udp_queue_t *queue) |
typedef struct TAGtrcudp_queue_t { |
/*+ size of the queue +*/ |
int size; |
/*+ index of the next insertion into the queue +*/ |
int index; |
/*+ index of the next item to write (if online_tracer activated) +*/ |
int windex; |
/*+ number of events lost (if online_tracer activated) +*/ |
long hoops; |
/*+ local and remote IP numbers +*/ |
UDP_ADDR local, remote; |
/*+ unique number that identify the queue +*/ |
int uniq; |
/*+ =1 when the system shuts down +*/ |
int mustgodown; |
TASK_MODEL *m; |
/*+ dummy, needed for creating a valid packet (dirty trick ;-) +*/ |
short int dummy; |
/*+ events table +*/ |
trc_event_t table[0]; |
} trcudp_queue_t; |
static TASK online_tracer(trcudp_queue_t *queue) |
{ |
return &queue->evt; |
int s; /* the socket */ |
int newwindex; /* new write index after sending the packet */ |
int n; /* number of packets to send */ |
short int *pkt; |
s = udp_bind(&queue->local, NULL); |
for (;;) { |
if (queue->index<queue->windex) { |
if (queue->windex+TRCUDP_MAXEVENTS < queue->size) { |
newwindex = queue->windex+TRCUDP_MAXEVENTS; |
n = TRCUDP_MAXEVENTS; |
} else { |
newwindex = 0; |
n = queue->size-queue->windex; |
} |
} else { |
if (queue->windex+TRCUDP_MAXEVENTS < queue->index) { |
newwindex = queue->windex+TRCUDP_MAXEVENTS; |
n = TRCUDP_MAXEVENTS; |
} else { |
newwindex = queue->index; |
n = queue->index-queue->windex; |
} |
} |
if (n) { |
/* set the number of events into the UDP packet. It works |
because the event entry before windex is always empty, or |
because we use the dummy field into the struct */ |
pkt = ((short int *)(queue->table+queue->windex))-1; |
*pkt = (short int)n; |
udp_sendto(s,(char *)pkt, |
n*sizeof(trc_event_t)+2,&queue->remote); |
#ifdef DEBUG_TRCUDP |
printk(KERN_DEBUG "UDP: SEND %d events," |
" index %d windex %d new %d!!!\n",n, |
queue->index, queue->windex, newwindex); |
#endif |
queue->windex = newwindex; |
} |
if (queue->mustgodown) { |
if (queue->windex == queue->index) |
break; |
} |
else |
task_endcycle(); |
} |
return NULL; |
} |
static int udp_post(udp_queue_t *queue) |
static trc_event_t *trcudp_get(trcudp_queue_t *queue) |
{ |
//int s=0; |
/* s ??? */ |
//udp_sendto(s,&queue->evt,sizeof(trc_event_t),&queue->addr); |
if (queue->mustgodown) |
return NULL; |
if (queue->index==queue->size-1) { |
if (queue->windex==0) { |
queue->hoops++; |
return NULL; |
} |
queue->index=0; |
return &queue->table[queue->size-1]; |
} |
if (queue->index+1==queue->windex) { |
queue->hoops++; |
return NULL; |
} |
return &queue->table[queue->index++]; |
} |
static int trcudp_post(trcudp_queue_t *queue) |
{ |
return 0; |
} |
static int udp_create(trc_queue_t *queue, TRC_UDP_PARMS *args) |
static void trcudp_shutdown(trcudp_queue_t *queue); |
static int trcudp_create(trc_queue_t *p, TRC_UDP_PARMS *args) |
{ |
udp_queue_t *ptr; |
trcudp_queue_t *queue; |
if (args==NULL) return -1; |
if (args==NULL) { |
printk(KERN_ERR "trcudp_create: you must specify a non-NULL parameter!"); |
return -1; |
} |
ptr=(udp_queue_t*)kern_alloc(sizeof(udp_queue_t)); |
if (ptr==NULL) return -1; |
queue->get=(trc_event_t*(*)(void*))udp_get; |
queue->post=(int(*)(void*))udp_post; |
queue->data=ptr; |
queue=(trcudp_queue_t*)kern_alloc(sizeof(trcudp_queue_t)+ |
sizeof(trc_event_t)*args->size); |
if (queue==NULL) { |
printk(KERN_ERR "trcudp_create: error during memory allocation!"); |
return -1; |
} |
memcpy(&ptr->addr,&args->addr,sizeof(UDP_ADDR)); |
p->get=(trc_event_t*(*)(void*))trcudp_get; |
p->post=(int(*)(void*))trcudp_post; |
p->data=queue; |
queue->size=args->size; |
queue->windex=queue->index=0; |
queue->hoops=0; |
queue->local=args->local; |
queue->remote=args->remote; |
/* uniq initialized in trcudp_activate */ |
queue->mustgodown=0; |
queue->m = args->model; |
/* dummy unused */ |
/* AFTER exit because in that way we can hope to be back in text mode... */ |
sys_atrunlevel((void (*)(void *))trcudp_shutdown, (void *)queue, RUNLEVEL_AFTER_EXIT); |
return 0; |
} |
static int udp_activate(udp_queue_t *queue) |
static int trcudp_activate(trcudp_queue_t *queue, int uniq) |
{ |
SOFT_TASK_MODEL model; |
TASK_MODEL *m; |
PID pid; |
queue->uniq=uniq; |
if (!queue->m) { |
soft_task_default_model(model); |
soft_task_def_system(model); |
/* soft_task_def_notrace(model); Should we trace the tracer? */ |
soft_task_def_periodic(model); |
soft_task_def_period(model,250000); |
soft_task_def_met(model,10000); |
soft_task_def_wcet(model,10000); |
/* soft_task_def_nokill(model); NOOOOOOO!!!! */ |
soft_task_def_arg(model,queue); |
m = (TASK_MODEL *)&model; |
} |
else { |
m = queue->m; |
task_def_arg(*m,queue); |
} |
pid=task_create("trcUDP",online_tracer,m,NULL); |
if (pid==-1) { |
printk(KERN_ERR "can't start tracer online trcudp trace task"); |
} else |
task_activate(pid); |
return 0; |
} |
static int udp_terminate(udp_queue_t *queue) |
static int trcudp_terminate(trcudp_queue_t *queue) |
{ |
queue->mustgodown = 1; |
return 0; |
} |
static void trcudp_shutdown(trcudp_queue_t *queue) |
{ |
printk(KERN_NOTICE "tracer: %li events lost into UDP queue %d", |
queue->hoops, queue->uniq); |
} |
int trc_register_udp_queue(void) |
{ |
int res; |
res=trc_register_queuetype(TRC_UDP_QUEUE, |
(int(*)(trc_queue_t*,void*))udp_create, |
(int(*)(void*))udp_activate, |
(int(*)(void*))udp_terminate |
); |
if (res!=0) printk(KERN_WARNING "can't register tracer udp queue"); |
(int(*)(trc_queue_t*,void*))trcudp_create, |
(int(*)(void*,int))trcudp_activate, |
(int(*)(void*))trcudp_terminate |
); |
if (res!=0) printk(KERN_WARNING "can't register tracer trcudp queue"); |
return res; |
} |
/shark/tags/rel_0_4/kernel/modules/cabs.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: cabs.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: cabs.c,v 1.2 2002-10-28 07:55:54 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2002-10-28 07:55:54 $ |
------------ |
Date: 2/7/96 |
95,7 → 95,7 |
static int checkcab(CAB id) |
{ |
if (id >= MAX_CAB) { |
errno = ECAB_UNVALID_ID; |
errno = ECAB_INVALID_ID; |
return -1; |
} |
if (cabs[id].busy == TRUE) return TRUE; |
117,7 → 117,7 |
} |
cabs[MAX_CAB-1].next_cab_free = NIL; |
cabs[MAX_CAB-1].busy = FALSE; |
// for (i = CAB_UNVALID_MSG_NUM; i <= CAB_CLOSED; i++) |
// for (i = CAB_INVALID_MSG_NUM; i <= CAB_CLOSED; i++) |
// exc_set(i,cab_exception); |
} |
139,7 → 139,7 |
/* solleva l'eccezioni */ |
if (num_mes < 1) { |
errno = ECAB_UNVALID_MSG_NUM; |
errno = ECAB_INVALID_MSG_NUM; |
kern_frestore(f); |
return -1; |
} |
/shark/tags/rel_0_4/kernel/modules/trcdfix.c |
---|
0,0 → 1,152 |
/* |
* Project: S.Ha.R.K. |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors : |
* Massimiliano Giorgi <massy@gandalf.sssup.it> |
* (see the web pages for full authors list) |
* |
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
*/ |
#include <ll/sys/types.h> |
#include <ll/stdlib.h> |
#include <kernel/func.h> |
#include <kernel/mem.h> |
#include <kernel/log.h> |
#include <trace/types.h> |
#include <trace/trace.h> |
#include <trace/queues.h> |
#include <ll/i386/x-dos.h> |
/* this file implement a fixed queue, that is simply an array that |
is filled with the events until it is full. After that, all the other |
events are discarded. It uses the DOSFS Filesystem to write all the data |
This file is derived from the trcfixed.c file; I used a different file |
because including trcfixed.c in the executable would have implied the |
linking of all the filesystem... |
*/ |
typedef struct TAGfixed_queue_t { |
int size; |
int index; |
char *filename; |
int uniq; |
trc_event_t table[0]; |
/* Yes, 0!... the elements are allocated |
in a dirty way into the kern_alloc into fixed_create */ |
} dosfs_fixed_queue_t; |
/* This function simply return an event to fill (only if the fixed table |
is not yet full) */ |
static trc_event_t *dosfs_fixed_get(dosfs_fixed_queue_t *queue) |
{ |
if (queue->index>=queue->size) return NULL; |
return &queue->table[queue->index++]; |
} |
/* since get returns the correct event address, |
the post function does nothing... */ |
static int dosfs_fixed_post(dosfs_fixed_queue_t *queue) |
{ |
return 0; |
} |
static TRC_FIXED_PARMS defaultargs; |
static int once=0; |
static void dosfs_fixed_flush(void *arg); |
static int dosfs_fixed_create(trc_queue_t *queue, TRC_FIXED_PARMS *args) |
{ |
dosfs_fixed_queue_t *ptr; |
/* initialize the default arguments for the fixed queue */ |
if (!once) { |
/* well... this func is called when the system is not running! */ |
once=1; |
trc_fixed_default_parms(defaultargs); |
} |
if (args==NULL) args=&defaultargs; |
/* allocate the fixed queue data structure plus the array of events */ |
ptr=(dosfs_fixed_queue_t*)kern_alloc(sizeof(dosfs_fixed_queue_t)+ |
sizeof(trc_event_t)*(args->size+1)); |
if (ptr==NULL) return -1; |
/* set the current queue pointers and data */ |
queue->get=(trc_event_t*(*)(void*))dosfs_fixed_get; |
queue->post=(int(*)(void*))dosfs_fixed_post; |
queue->data=ptr; |
ptr->size=args->size; |
ptr->index=0; |
ptr->filename=args->filename; |
/* prepare for shutdown ;-) */ |
sys_atrunlevel(dosfs_fixed_flush, (void *)ptr, RUNLEVEL_AFTER_EXIT); |
return 0; |
} |
static void dosfs_fixed_flush(void *arg) |
{ |
DOS_FILE *f; |
dosfs_fixed_queue_t *queue = (dosfs_fixed_queue_t *)arg; |
char pathname[100]; /* it should be PATH_MAX, but we do not use the |
filesystem, so the symbol is not defined */ |
if (queue->filename==NULL) trc_create_name("fix",queue->uniq,pathname); |
else trc_create_name(queue->filename,0,pathname); |
printk(KERN_DEBUG "tracer flush index= %d pathname=%s\n", |
queue->index, pathname); |
f = DOS_fopen(pathname,"w"); |
DOS_fwrite(queue->table,1,queue->index*sizeof(trc_event_t),f); |
DOS_fclose(f); |
} |
static int dosfs_fixed_activate(dosfs_fixed_queue_t *queue, int uniq) |
{ |
queue->uniq=uniq; |
return 0; |
} |
static int dosfs_fixed_terminate(dosfs_fixed_queue_t *queue) |
{ |
return 0; |
} |
int trc_register_dosfs_fixed_queue(void) |
{ |
int res; |
res=trc_register_queuetype(TRC_DOSFS_FIXED_QUEUE, |
(int(*)(trc_queue_t*,void*))dosfs_fixed_create, |
(int(*)(void*,int))dosfs_fixed_activate, |
(int(*)(void*))dosfs_fixed_terminate |
); |
if (res!=0) printk(KERN_WARNING "can't register tracer DOSFS fixed queue"); |
return res; |
} |