/shark/tags/rel_0_3/kernel/sleep.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/endcycle.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/kernold.s |
---|
File deleted |
/shark/tags/rel_0_3/kernel/oldmem.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/makefile |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/initfs.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/initblk.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/rm1.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/h3pi.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/hartik3.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/h3pips.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/h3piss.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/initg.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/pinit.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/init1.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/init2.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/init3.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/init4.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/init5.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/init/init6.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/create.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/delay.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/newmem.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/exchtxt.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/status.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/qqueue.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/queue.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/exchgrx.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/modules/edf2.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/modules/old/trace.c |
---|
File deleted |
/shark/tags/rel_0_3/kernel/modules/edf.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: edf.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: edf.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the scheduling module EDF (Earliest Deadline First) |
34,7 → 34,7 |
**/ |
/* |
* Copyright (C) 2000 Paolo Gai |
* Copyright (C) 2000,2002 Paolo Gai |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
62,12 → 62,11 |
#include <kernel/func.h> |
#include <kernel/trace.h> |
//#define edf_printf kern_printf |
#define edf_printf printk |
//#define EDFDEBUG |
#define edf_printf kern_printf |
/*+ Status used in the level +*/ |
#define EDF_READY MODULE_STATUS_BASE /*+ - Ready status +*/ |
#define EDF_DELAY MODULE_STATUS_BASE+1 /*+ - Delay status +*/ |
#define EDF_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/ |
#define EDF_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/ |
#define EDF_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/ |
90,7 → 89,7 |
/*+ used to manage the JOB_TASK_MODEL and the |
periodicity +*/ |
QUEUE ready; /*+ the ready queue +*/ |
IQUEUE ready; /*+ the ready queue +*/ |
int flags; /*+ the init flags... +*/ |
99,28 → 98,15 |
} EDF_level_des; |
static char *EDF_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case EDF_READY : return "EDF_Ready"; |
case EDF_DELAY : return "EDF_Delay"; |
case EDF_WCET_VIOLATED: return "EDF_Wcet_Violated"; |
case EDF_WAIT : return "EDF_Sporadic_Wait"; |
case EDF_IDLE : return "EDF_Idle"; |
case EDF_ZOMBIE : return "EDF_Zombie"; |
default : return "EDF_Unknown"; |
} |
} |
static void EDF_timer_deadline(void *par) |
{ |
PID p = (PID) par; |
EDF_level_des *lev; |
struct timespec *temp; |
#ifdef EDFDEBUG |
edf_printf("$"); |
#endif |
lev = (EDF_level_des *)level_table[proc_table[p].task_level]; |
128,7 → 114,7 |
case EDF_ZOMBIE: |
/* we finally put the task in the ready queue */ |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
iq_insertfirst(p,&freedesc); |
/* and free the allocated bandwidth */ |
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet; |
break; |
137,18 → 123,17 |
/* tracer stuff */ |
trc_logevent(TRC_INTACTIVATION,&p); |
/* similar to EDF_task_activate */ |
TIMESPEC_ASSIGN(&proc_table[p].request_time, |
&proc_table[p].timespec_priority); |
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority); |
temp = iq_query_timespec(p,&lev->ready); |
ADDUSEC2TIMESPEC(lev->period[p], temp); |
proc_table[p].status = EDF_READY; |
q_timespec_insert(p,&lev->ready); |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
iq_timespec_insert(p,&lev->ready); |
lev->deadline_timer[p] = kern_event_post(temp, |
EDF_timer_deadline, |
(void *)p); |
edf_printf("(dline p%d ev%d %d.%d)",(int)p,(int)lev->deadline_timer[p],(int)proc_table[p].timespec_priority.tv_sec,(int)proc_table[p].timespec_priority.tv_nsec/1000); |
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority ); |
#ifdef EDFDEBUG |
edf_printf("(dline p%d ev%d %d.%d)",(int)p,(int)lev->deadline_timer[p],(int)temp->tv_sec,(int)temp->tv_nsec/1000); |
#endif |
event_need_reschedule(); |
printk("el%d|",p); |
break; |
case EDF_WAIT: |
158,8 → 143,10 |
default: |
/* else, a deadline miss occurred!!! */ |
#ifdef EDFDEBUG |
edf_printf("\nstatus %d\n", (int)proc_table[p].status); |
edf_printf("timer_deadline:AAARRRGGGHHH!!!"); |
#endif |
kern_raise(XDEADLINE_MISS,p); |
} |
} |
168,116 → 155,26 |
{ |
PID p = (PID) par; |
#ifdef EDFDEBUG |
edf_printf("AAARRRGGGHHH!!!"); |
#endif |
kern_raise(XDEADLINE_MISS,p); |
} |
/*+ this function is called when a task finish his delay +*/ |
static void EDF_timer_delay(void *par) |
/* The scheduler only gets the first task in the queue */ |
static PID EDF_public_scheduler(LEVEL l) |
{ |
PID p = (PID) par; |
EDF_level_des *lev; |
lev = (EDF_level_des *)level_table[proc_table[p].task_level]; |
proc_table[p].status = EDF_READY; |
q_timespec_insert(p,&lev->ready); |
proc_table[p].delay_timer = NIL; /* Paranoia */ |
event_need_reschedule(); |
} |
static int EDF_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) { |
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m; |
if (h->wcet && h->mit) |
return 0; |
} |
return -1; |
} |
static int EDF_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == JOB_PCLASS || m->pclass == (JOB_PCLASS | l)) |
return 0; |
else |
return -1; |
} |
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
static void EDF_level_status(LEVEL l) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
PID p = lev->ready; |
kern_printf("Wcet Check : %s\n", |
onoff(lev->flags & EDF_ENABLE_WCET_CHECK)); |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & EDF_ENABLE_GUARANTEE)); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
#ifdef EDFDEBUG |
edf_printf("(s%d)", iq_query_first(&lev->ready)); |
#endif |
while (p != NIL) { |
if ((proc_table[p].pclass) == JOB_PCLASS) |
kern_printf("Pid: %2d (GUEST)\n", p); |
else |
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n", |
p, |
proc_table[p].name, |
lev->flag[p] & EDF_FLAG_SPORADIC ? "MinITime" : "Period ", |
lev->period[p], |
proc_table[p].timespec_priority.tv_sec, |
proc_table[p].timespec_priority.tv_nsec/1000, |
EDF_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != EDF_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n", |
p, |
proc_table[p].name, |
lev->flag[p] & EDF_FLAG_SPORADIC ? "MinITime" : "Period ", |
lev->period[p], |
proc_table[p].timespec_priority.tv_sec, |
proc_table[p].timespec_priority.tv_nsec/1000, |
EDF_status_to_a(proc_table[p].status)); |
return iq_query_first(&lev->ready); |
} |
/* The scheduler only gets the first task in the queue */ |
static PID EDF_level_scheduler(LEVEL l) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* { // print 4 dbg the ready queue |
PID p= lev->ready; |
kern_printf("(s"); |
while (p != NIL) { |
kern_printf("%d ",p); |
p = proc_table[p].next; |
} |
kern_printf(") "); |
} |
*/ |
return (PID)lev->ready; |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int EDF_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int EDF_public_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
295,14 → 192,20 |
} |
static int EDF_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int EDF_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
HARD_TASK_MODEL *h; |
/* if the EDF_task_create is called, then the pclass must be a |
valid pclass. */ |
if (m->pclass != HARD_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
h = (HARD_TASK_MODEL *)m; |
if (!h->wcet || !h->mit) return -1; |
/* now we know that m is a valid model */ |
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m; |
#ifdef EDFDEBUG |
edf_printf("(cr%d)", p); |
#endif |
lev->period[p] = h->mit; |
346,7 → 249,7 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
static void EDF_task_detach(LEVEL l, PID p) |
static void EDF_public_detach(LEVEL l, PID p) |
{ |
/* the EDF level doesn't introduce any dinamic allocated new field. |
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated |
354,6 → 257,10 |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
#ifdef EDFDEBUG |
edf_printf("(det%d)", p); |
#endif |
if (lev->flags & EDF_FAILED_GUARANTEE) |
lev->flags &= ~EDF_FAILED_GUARANTEE; |
else |
360,46 → 267,27 |
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet; |
} |
static int EDF_task_eligible(LEVEL l, PID p) |
static void EDF_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void EDF_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
#ifdef EDFDEBUG |
edf_printf("(disp p%d %d.%d)",(int)p,(int)schedule_time.tv_sec,(int)schedule_time.tv_nsec/1000); |
#endif |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
q_extract(p, &lev->ready); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
iq_extract(p, &lev->ready); |
} |
static void EDF_task_epilogue(LEVEL l, PID p) |
static void EDF_public_epilogue(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
#ifdef EDFDEBUG |
edf_printf("(epil p%d %d.%d)",p,(int)schedule_time.tv_sec,(int)schedule_time.tv_nsec/1000); |
#endif |
/* check if the wcet is finished... */ |
if ((lev->flags & EDF_ENABLE_WCET_CHECK) && proc_table[p].avail_time <= 0) { |
409,15 → 297,20 |
} |
else { |
/* the task has been preempted. it returns into the ready queue... */ |
q_timespec_insert(p,&lev->ready); |
iq_timespec_insert(p,&lev->ready); |
proc_table[p].status = EDF_READY; |
} |
} |
static void EDF_task_activate(LEVEL l, PID p) |
static void EDF_public_activate(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
struct timespec *temp; |
#ifdef EDFDEBUG |
edf_printf("(act%d)", p); |
#endif |
if (proc_table[p].status == EDF_WAIT) { |
kern_raise(XACTIVATION,p); |
return; |
431,36 → 324,36 |
/* see also EDF_timer_deadline */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
temp = iq_query_timespec(p, &lev->ready); |
kern_gettime(temp); |
ADDUSEC2TIMESPEC(lev->period[p], temp); |
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, |
&proc_table[p].request_time); |
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority); |
/* Insert task in the correct position */ |
proc_table[p].status = EDF_READY; |
q_timespec_insert(p,&lev->ready); |
iq_timespec_insert(p,&lev->ready); |
/* Set the deadline timer */ |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
lev->deadline_timer[p] = kern_event_post(temp, |
EDF_timer_deadline, |
(void *)p); |
edf_printf("(dline p%d ev%d %d.%d)",p,(int)lev->deadline_timer[p],(int)proc_table[p].timespec_priority.tv_sec,(int)proc_table[p].timespec_priority.tv_nsec/1000); |
#ifdef EDFDEBUG |
edf_printf("(dline p%d ev%d %d.%d)",p,(int)lev->deadline_timer[p],(int)temp->tv_sec,(int)temp->tv_nsec/1000); |
#endif |
} |
static void EDF_task_insert(LEVEL l, PID p) |
static void EDF_public_unblock(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* Similar to EDF_task_activate, but we don't check in what state |
the task is and we don't set the request_time*/ |
/* Similar to EDF_task_activate, |
but we don't check in what state the task is */ |
/* Insert task in the coEDFect position */ |
proc_table[p].status = EDF_READY; |
q_timespec_insert(p,&lev->ready); |
iq_timespec_insert(p,&lev->ready); |
} |
static void EDF_task_extract(LEVEL l, PID p) |
static void EDF_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
473,30 → 366,35 |
*/ |
} |
static void EDF_task_endcycle(LEVEL l, PID p) |
static int EDF_public_message(LEVEL l, PID p, void *m) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
#ifdef EDFDEBUG |
edf_printf("(ecyc p%d %d.%d)",p,(int)schedule_time.tv_sec,(int)schedule_time.tv_nsec/1000); |
#endif |
/* the task has terminated his job before it consume the wcet. All OK! */ |
if (lev->flag[p] & EDF_FLAG_SPORADIC) |
if (!lev->flag[p] & EDF_FLAG_SPORADIC) |
proc_table[p].status = EDF_IDLE; |
else |
proc_table[p].status = EDF_WAIT; |
else /* pclass = sporadic_pclass */ |
proc_table[p].status = EDF_IDLE; |
/* we reset the capacity counters... */ |
if (lev->flags & EDF_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
/* when the deadline timer fire, it recognize the situation and set |
correctly all the stuffs (like reactivation, request_time, etc... ) */ |
correctly all the stuffs (like reactivation, sleep, etc... ) */ |
return 0; |
} |
static void EDF_task_end(LEVEL l, PID p) |
static void EDF_public_end(LEVEL l, PID p) |
{ |
// EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
proc_table[p].status = EDF_ZOMBIE; |
/* When the deadline timer fire, it put the task descriptor in |
503,182 → 401,81 |
the free queue, and free the allocated bandwidth... */ |
} |
static void EDF_task_sleep(LEVEL l, PID p) |
static void EDF_private_insert(LEVEL l, PID p, TASK_MODEL *m) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
JOB_TASK_MODEL *job; |
/* the task has terminated his job before it consume the wcet. All OK! */ |
proc_table[p].status = EDF_WAIT; |
if (m->pclass != JOB_PCLASS || (m->level != 0 && m->level != l) ) { |
kern_raise(XINVALID_TASK, p); |
return; |
} |
/* we reset the capacity counters... */ |
if (lev->flags & EDF_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
job = (JOB_TASK_MODEL *)m; |
/* when the deadline timer fire, it recognize the situation and set |
correctly the task state to sleep... */ |
} |
static void EDF_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
struct timespec wakeuptime; |
// EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* equal to EDF_task_endcycle */ |
proc_table[p].status = EDF_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT, &wakeuptime); |
ADDUSEC2TIMESPEC(usdelay, &wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
EDF_timer_delay, |
(void *)p); |
} |
/* Guest Functions |
These functions manages a JOB_TASK_MODEL, that is used to put |
a guest task in the EDF ready queue. */ |
static int EDF_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
JOB_TASK_MODEL *job = (JOB_TASK_MODEL *)m; |
/* if the EDF_guest_create is called, then the pclass must be a |
valid pclass. */ |
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &job->deadline); |
/* Insert task in the correct position */ |
*iq_query_timespec(p, &lev->ready) = job->deadline; |
iq_timespec_insert(p,&lev->ready); |
proc_table[p].status = EDF_READY; |
lev->deadline_timer[p] = -1; |
if (job->noraiseexc) |
lev->period[p] = job->period; |
/* Set the deadline timer */ |
if (!(job->noraiseexc)) |
lev->flag[p] = EDF_FLAG_NORAISEEXC; |
else |
else { |
lev->flag[p] = 0; |
lev->period[p] = job->period; |
/* there is no bandwidth guarantee at this level, it is performed |
by the level that inserts guest tasks... */ |
return 0; /* OK, also if the task cannot be guaranteed... */ |
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready), |
EDF_timer_guest_deadline, |
(void *)p); |
} |
} |
static void EDF_guest_detach(LEVEL l, PID p) |
static void EDF_private_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the EDF level doesn't introduce any dinamic allocated new field. |
No guarantee is performed on guest tasks... so we don't have to reset |
the NO_GUARANTEE FIELD */ |
} |
static void EDF_guest_dispatch(LEVEL l, PID p, int nostop) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* the task state is set to EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
q_extract(p, &lev->ready); |
iq_extract(p, &lev->ready); |
} |
static void EDF_guest_epilogue(LEVEL l, PID p) |
static void EDF_private_epilogue(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* the task has been preempted. it returns into the ready queue... */ |
q_timespec_insert(p,&lev->ready); |
iq_timespec_insert(p,&lev->ready); |
proc_table[p].status = EDF_READY; |
} |
static void EDF_guest_activate(LEVEL l, PID p) |
static void EDF_private_extract(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* Insert task in the correct position */ |
q_timespec_insert(p,&lev->ready); |
proc_table[p].status = EDF_READY; |
/* Set the deadline timer */ |
if (!(lev->flag[p] & EDF_FLAG_NORAISEEXC)) |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
EDF_timer_guest_deadline, |
(void *)p); |
} |
static void EDF_guest_insert(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* Insert task in the correct position */ |
q_timespec_insert(p,&lev->ready); |
proc_table[p].status = EDF_READY; |
} |
static void EDF_guest_extract(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
. the state of the task is set by the calling function |
. the deadline must remain... |
So, we do nothing!!! |
*/ |
} |
static void EDF_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void EDF_guest_end(LEVEL l, PID p) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
//kern_printf("EDF_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
#ifdef EDFDEBUG |
edf_printf("EDF_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
#endif |
if (proc_table[p].status == EDF_READY) |
{ |
q_extract(p, &lev->ready); |
//kern_printf("(g_end rdy extr)"); |
} |
else if (proc_table[p].status == EDF_DELAY) { |
event_delete(proc_table[p].delay_timer); |
proc_table[p].delay_timer = NIL; /* paranoia */ |
} |
iq_extract(p, &lev->ready); |
/* we remove the deadline timer, because the slice is finished */ |
if (lev->deadline_timer[p] != NIL) { |
// kern_printf("EDF_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
event_delete(lev->deadline_timer[p]); |
kern_event_delete(lev->deadline_timer[p]); |
lev->deadline_timer[p] = NIL; |
} |
} |
static void EDF_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void EDF_guest_delay(LEVEL l, PID p, TIME usdelay) |
{ |
struct timespec wakeuptime; |
// EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
/* equal to EDF_task_endcycle */ |
proc_table[p].status = EDF_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT, &wakeuptime); |
ADDUSEC2TIMESPEC(usdelay, &wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
EDF_timer_delay, |
(void *)p); |
} |
/* Registration functions */ |
/*+ Registration function: |
int flags the init flags ... see edf.h +*/ |
void EDF_register_level(int flags) |
LEVEL EDF_register_level(int flags) |
{ |
LEVEL l; /* the level that we register */ |
EDF_level_des *lev; /* for readableness only */ |
687,58 → 484,34 |
printk("EDF_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(EDF_level_des)); |
printk(" alloco descrittore %d %d\n",l,(int)sizeof(EDF_level_des)); |
lev = (EDF_level_des *)level_table[l]; |
/* alloc the space needed for the EDF_level_des */ |
lev = (EDF_level_des *)kern_alloc(sizeof(EDF_level_des)); |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, EDF_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = EDF_LEVEL_CODE; |
lev->l.level_version = EDF_LEVEL_VERSION; |
lev->l.private_insert = EDF_private_insert; |
lev->l.private_extract = EDF_private_extract; |
lev->l.private_dispatch = EDF_private_dispatch; |
lev->l.private_epilogue = EDF_private_epilogue; |
lev->l.level_accept_task_model = EDF_level_accept_task_model; |
lev->l.level_accept_guest_model = EDF_level_accept_guest_model; |
lev->l.level_status = EDF_level_status; |
lev->l.level_scheduler = EDF_level_scheduler; |
lev->l.public_scheduler = EDF_public_scheduler; |
if (flags & EDF_ENABLE_GUARANTEE) |
lev->l.level_guarantee = EDF_level_guarantee; |
lev->l.public_guarantee = EDF_public_guarantee; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.task_create = EDF_task_create; |
lev->l.task_detach = EDF_task_detach; |
lev->l.task_eligible = EDF_task_eligible; |
lev->l.task_dispatch = EDF_task_dispatch; |
lev->l.task_epilogue = EDF_task_epilogue; |
lev->l.task_activate = EDF_task_activate; |
lev->l.task_insert = EDF_task_insert; |
lev->l.task_extract = EDF_task_extract; |
lev->l.task_endcycle = EDF_task_endcycle; |
lev->l.task_end = EDF_task_end; |
lev->l.task_sleep = EDF_task_sleep; |
lev->l.task_delay = EDF_task_delay; |
lev->l.public_create = EDF_public_create; |
lev->l.public_detach = EDF_public_detach; |
lev->l.public_end = EDF_public_end; |
lev->l.public_dispatch = EDF_public_dispatch; |
lev->l.public_epilogue = EDF_public_epilogue; |
lev->l.public_activate = EDF_public_activate; |
lev->l.public_unblock = EDF_public_unblock; |
lev->l.public_block = EDF_public_block; |
lev->l.public_message = EDF_public_message; |
lev->l.guest_create = EDF_guest_create; |
lev->l.guest_detach = EDF_guest_detach; |
lev->l.guest_dispatch = EDF_guest_dispatch; |
lev->l.guest_epilogue = EDF_guest_epilogue; |
lev->l.guest_activate = EDF_guest_activate; |
lev->l.guest_insert = EDF_guest_insert; |
lev->l.guest_extract = EDF_guest_extract; |
lev->l.guest_endcycle = EDF_guest_endcycle; |
lev->l.guest_end = EDF_guest_end; |
lev->l.guest_sleep = EDF_guest_sleep; |
lev->l.guest_delay = EDF_guest_delay; |
/* fill the EDF descriptor part */ |
for(i=0; i<MAX_PROC; i++) { |
lev->period[i] = 0; |
746,18 → 519,17 |
lev->flag[i] = 0; |
} |
lev->ready = NIL; |
iq_init(&lev->ready, &freedesc, 0); |
lev->flags = flags & 0x07; |
lev->U = 0; |
return l; |
} |
bandwidth_t EDF_usedbandwidth(LEVEL l) |
{ |
EDF_level_des *lev = (EDF_level_des *)(level_table[l]); |
if (lev->l.level_code == EDF_LEVEL_CODE && |
lev->l.level_version == EDF_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
/shark/tags/rel_0_3/kernel/modules/trace.c |
---|
38,11 → 38,11 |
*/ |
/* |
* CVS : $Id: trace.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
* CVS : $Id: trace.c,v 1.3 2003-01-07 17:07:51 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:52 $ |
* Revision: $Revision: 1.3 $ |
* Last update: $Date: 2003-01-07 17:07:51 $ |
*/ |
#include <ll/sys/types.h> |
58,6 → 58,7 |
#include <bits/limits.h> |
/* maximum number of different queues where we want to log our events */ |
#define TRC_MAXQUEUES 5 |
/* |
64,8 → 65,11 |
* |
*/ |
/* this is the base path that is used as a prologue for all the |
filenames that are passed to the tracer */ |
static char basepath[PATH_MAX]; |
/* used to create the name for a tracer file */ |
void trc_create_name(char *basename, int uniq, char *pathname) |
{ |
if (uniq) sprintf(pathname,"%s/%s%i",basepath,basename,uniq); |
76,27 → 80,42 |
* |
*/ |
/* the flag used to discriminate if an event have to be traced or not */ |
#define FLAG_NOTRACE 0x01 |
typedef struct TAGtrc_evtinfo_t { |
trc_queue_t *queue; |
unsigned flags; |
trc_queue_t *queue; /* the queue responsible for the logging of an event */ |
unsigned flags; /* if = FLAG_NOTRACE the event must not be logged */ |
} trc_evtinfo_t; |
/* -- */ |
/* one entry for each event; this array says for each event the queue to use |
and if it must be logged */ |
trc_evtinfo_t eventstable[TRC_NUMEVENTS]; |
/* For each kind of queue (see include/tracer/queues.h) there is a set of |
pointers to the functions that a queue should implement */ |
int (*createqueue[TRC_QUEUETYPESNUMBER])(trc_queue_t *, void *); |
int (*activatequeue[TRC_QUEUETYPESNUMBER])(void *,int); |
int (*terminatequeue[TRC_QUEUETYPESNUMBER])(void *); |
/* for each queue registered in the system, |
the functions used to get/post an event |
The elements of this table are initialized with calls to createqueue[type]() |
(see include/trace/queues.h) */ |
trc_queue_t queuetable[TRC_MAXQUEUES]; |
/* initialized as a dummy queue, the default value of all the queues */ |
trc_queue_t queuesink; |
/* number of registered queues in the system */ |
int numqueues; |
/* -- */ |
/* The Dummy queue */ |
static trc_event_t *dummy_get(void *foo) |
{ |
return NULL; |
127,6 → 146,8 |
/* -- */ |
/* this function simply register the functions that are used to |
handle a queue */ |
int trc_register_queuetype(int queuetype, |
int(*creat)(trc_queue_t *, void *), |
int(*activate)(void *,int), |
139,6 → 160,11 |
return 0; |
} |
/* this function register a queue in the system. |
It uses the type to access to the queue handling functions registered |
with the previous function (trc_register_queuetype) |
numqueue is incremented! |
*/ |
int trc_create_queue(int queuetype, void *args) |
{ |
int res; |
186,20 → 212,28 |
printk(KERN_INFO "initializing tracer..."); |
/* all the queues are initialized to the dummy queue (sink!) */ |
for (i=0;i<TRC_QUEUETYPESNUMBER;i++) { |
createqueue[i]=dummy_createqueue; |
terminatequeue[i]=dummy_terminatequeue; |
} |
/* the sink queue is initialized */ |
dummy_createqueue(&queuesink,NULL); |
/* no queues registered yet */ |
numqueues=0; |
/* all the events are initialized to put to the sink queue */ |
for (i=0;i<TRC_NUMEVENTS;i++) { |
eventstable[i].queue=&queuesink; |
eventstable[i].flags=FLAG_NOTRACE; |
} |
/* this will end the tracer at shutdown */ |
i=sys_atrunlevel(trc_end,NULL,RUNLEVEL_SHUTDOWN); |
/* initialize the parameters if not initialized */ |
{ |
TRC_PARMS m; |
trc_default_parms(m); |
212,10 → 246,13 |
trc_suspend=internal_trc_suspend; |
trc_resume=internal_trc_resume; |
/* start the tracer */ |
trc_resume(); |
return 0; |
} |
/* this function simply activates all the registered queues. |
This is usually called into the init() tasks!!! */ |
int TRC_init_phase2(void) |
{ |
int i; |
224,6 → 261,8 |
return 0; |
} |
/* saves the current logevent function and set it as |
the internal_trc_logevent */ |
static int internal_trc_resume(void) |
{ |
SYS_FLAGS f; |
238,6 → 277,8 |
return ret; |
} |
/* restores the saved logevent function (initially, the logevent function is |
a dummy function) */ |
static int internal_trc_suspend(void) |
{ |
SYS_FLAGS f; |
258,8 → 299,10 |
trc_queue_t *queue; |
SYS_FLAGS f; |
/* disables interrupts (this function can be called also into a task */ |
f=kern_fsave(); |
/* check if the event has to be logged */ |
if (eventstable[event].flags&FLAG_NOTRACE) { |
kern_frestore(f); |
return; |
266,10 → 309,11 |
} |
queue=eventstable[event].queue; |
/* gets a free event descriptor, fills it and post it */ |
evt=queue->get(queue->data); |
if (evt!=NULL) { |
evt->event=event; |
evt->time=ll_gettime(TIME_EXACT,NULL); |
evt->time=kern_gettime(NULL); |
memcpy(&evt->x,ptr,sizeof(trc_allevents_t)); |
queue->post(queue->data); |
} |
283,6 → 327,10 |
* |
*/ |
/* these set of functions can be used to trace or not single event and classes. |
They make use of the classtable structure, that is used to discriminate |
the indexes occupied by every class */ |
int classtable[TRC_NUMCLASSES+1]={ |
TRC_F_TRACER, |
TRC_F_SYSTEM, |
353,21 → 401,29 |
{ |
int qf,qc; |
int res; |
/* initialize the trace */ |
res=TRC_init_phase1(NULL); |
if (res) return res; |
/* register two kinds of queues, fixed and circular */ |
res=trc_register_circular_queue(); |
if (res) return res; |
res=trc_register_fixed_queue(); |
if (res) return res; |
/* creates two queues: |
a circular queue for the system events, |
a fixed queue |
*/ |
qc=trc_create_queue(TRC_CIRCULAR_QUEUE,NULL); |
qf=trc_create_queue(TRC_FIXED_QUEUE,NULL); |
if (qc==-1||qf==-1) return -97; |
/* We want to trace all the system events */ |
res=trc_trace_class(TRC_CLASS_SYSTEM); |
if (res) return res; |
/* All the system events must be traced into the circular queue */ |
res=trc_assign_class_to_queue(TRC_CLASS_SYSTEM,qc); |
if (res) return res; |
/shark/tags/rel_0_3/kernel/modules/posix.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: posix.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: posix.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the scheduling module compatible with POSIX |
63,10 → 63,10 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/*+ Status used in the level +*/ |
#define POSIX_READY MODULE_STATUS_BASE |
#define POSIX_DELAY MODULE_STATUS_BASE+1 |
/*+ the level redefinition for the Round Robin level +*/ |
typedef struct { |
73,8 → 73,10 |
level_des l; /*+ the standard level descriptor +*/ |
int nact[MAX_PROC]; /*+ number of pending activations +*/ |
int priority[MAX_PROC]; /*+ priority of each task +*/ |
QQUEUE *ready; /*+ the ready queue array +*/ |
IQUEUE *ready; /*+ the ready queue array +*/ |
int slice; /*+ the level's time slice +*/ |
87,73 → 89,11 |
} POSIX_level_des; |
static char *POSIX_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case POSIX_READY: return "POSIX_Ready"; |
case POSIX_DELAY: return "POSIX_Delay"; |
default : return "POSIX_Unknown"; |
} |
} |
/*+ this function is called when a task finish his delay +*/ |
static void POSIX_timer_delay(void *par) |
{ |
PID p = (PID) par; |
POSIX_level_des *lev; |
lev = (POSIX_level_des *)level_table[proc_table[p].task_level]; |
proc_table[p].status = POSIX_READY; |
qq_insertlast(p,&lev->ready[proc_table[p].priority]); |
proc_table[p].delay_timer = NIL; /* Paranoia */ |
// kern_printf(" DELAY TIMER %d ", p); |
event_need_reschedule(); |
} |
static int POSIX_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l)) |
return 0; |
else |
return -1; |
} |
static int POSIX_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static void POSIX_level_status(LEVEL l) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
PID p; |
kern_printf("Slice: %d \n", lev->slice); |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != POSIX_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %d\t Name: %20s Prio: %3ld Status: %s\n", |
p,proc_table[p].name, |
proc_table[p].priority, |
POSIX_status_to_a(proc_table[p].status)); |
} |
/* This is not efficient but very fair :-) |
The need of all this stuff is because if a task execute a long time |
due to (shadow!) priority inheritance, then the task shall go to the |
tail of the queue many times... */ |
static PID POSIX_level_scheduler(LEVEL l) |
static PID POSIX_public_scheduler(LEVEL l) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
164,7 → 104,7 |
prio = lev->maxpriority; |
for (;;) { |
p = qq_queryfirst(&lev->ready[prio]); |
p = iq_query_first(&lev->ready[prio]); |
if (p == NIL) { |
if (prio) { |
prio--; |
177,8 → 117,8 |
if ((proc_table[p].control & CONTROL_CAP) && |
(proc_table[p].avail_time <= 0)) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_extract(p,&lev->ready[prio]); |
qq_insertlast(p,&lev->ready[prio]); |
iq_extract(p,&lev->ready[prio]); |
iq_insertlast(p,&lev->ready[prio]); |
} |
else |
return p; |
185,19 → 125,15 |
} |
} |
static int POSIX_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int POSIX_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
/* the POSIX level always guarantee... the function is defined because |
there can be an aperiodic server at a level with less priority than |
the POSIX that need guarantee (e.g., a TBS server) */ |
return 1; |
} |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
NRT_TASK_MODEL *nrt; |
if (m->pclass != NRT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
static int POSIX_task_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m; |
nrt = (NRT_TASK_MODEL *)m; |
/* the task state is set at SLEEP by the general task_create */ |
208,7 → 144,7 |
proc_table[exec_shadow].task_level == l) { |
/* We inherit the scheduling properties if the scheduling level |
*is* the same */ |
proc_table[p].priority = proc_table[exec_shadow].priority; |
lev->priority[p] = lev->priority[exec_shadow]; |
proc_table[p].avail_time = proc_table[exec_shadow].avail_time; |
proc_table[p].wcet = proc_table[exec_shadow].wcet; |
219,7 → 155,7 |
lev->nact[p] = (lev->nact[exec_shadow] == -1) ? -1 : 0; |
} |
else { |
proc_table[p].priority = nrt->weight; |
lev->priority[p] = nrt->weight; |
if (nrt->slice) { |
proc_table[p].avail_time = nrt->slice; |
242,54 → 178,23 |
return 0; /* OK */ |
} |
static void POSIX_task_detach(LEVEL l, PID p) |
static void POSIX_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the POSIX level doesn't introduce any new field in the TASK_MODEL |
so, all detach stuffs are done by the task_create |
The task state is set at FREE by the general task_create */ |
} |
static int POSIX_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void POSIX_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
qq_extract(p, &lev->ready[proc_table[p].priority]); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
iq_extract(p, &lev->ready[lev->priority[p]]); |
} |
static void POSIX_task_epilogue(LEVEL l, PID p) |
static void POSIX_public_epilogue(LEVEL l, PID p) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
if (lev->yielding) { |
lev->yielding = 0; |
qq_insertlast(p,&lev->ready[proc_table[p].priority]); |
iq_insertlast(p,&lev->ready[lev->priority[p]]); |
} |
/* check if the slice is finished and insert the task in the coPOSIXect |
qqueue position */ |
296,15 → 201,15 |
else if (proc_table[p].control & CONTROL_CAP && |
proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_insertlast(p,&lev->ready[proc_table[p].priority]); |
iq_insertlast(p,&lev->ready[lev->priority[p]]); |
} |
else |
qq_insertfirst(p,&lev->ready[proc_table[p].priority]); |
iq_insertfirst(p,&lev->ready[lev->priority[p]]); |
proc_table[p].status = POSIX_READY; |
} |
static void POSIX_task_activate(LEVEL l, PID p) |
static void POSIX_public_activate(LEVEL l, PID p) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
316,26 → 221,24 |
return; |
} |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
/* Insert task in the correct position */ |
proc_table[p].status = POSIX_READY; |
qq_insertlast(p,&lev->ready[proc_table[p].priority]); |
iq_insertlast(p,&lev->ready[lev->priority[p]]); |
} |
static void POSIX_task_insert(LEVEL l, PID p) |
static void POSIX_public_unblock(LEVEL l, PID p) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
/* Similar to POSIX_task_activate, but we don't check in what state |
the task is and we don't set the request_time */ |
the task is */ |
/* Insert task in the coPOSIXect position */ |
proc_table[p].status = POSIX_READY; |
qq_insertlast(p,&lev->ready[proc_table[p].priority]); |
iq_insertlast(p,&lev->ready[lev->priority[p]]); |
} |
static void POSIX_task_extract(LEVEL l, PID p) |
static void POSIX_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
347,22 → 250,26 |
*/ |
} |
static void POSIX_task_endcycle(LEVEL l, PID p) |
static int POSIX_public_message(LEVEL l, PID p, void *m) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
if (lev->nact[p] > 0) { |
/* continue!!!! */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
lev->nact[p]--; |
qq_insertfirst(p,&lev->ready[proc_table[p].priority]); |
iq_insertfirst(p,&lev->ready[lev->priority[p]]); |
proc_table[p].status = POSIX_READY; |
} |
else |
proc_table[p].status = SLEEP; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
return 0; |
} |
static void POSIX_task_end(LEVEL l, PID p) |
static void POSIX_public_end(LEVEL l, PID p) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
370,69 → 277,9 |
/* then, we insert the task in the free queue */ |
proc_table[p].status = FREE; |
q_insert(p,&freedesc); |
iq_priority_insert(p,&freedesc); |
} |
static void POSIX_task_sleep(LEVEL l, PID p) |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
lev->nact[p] = 0; |
proc_table[p].status = SLEEP; |
} |
static void POSIX_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
// POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
struct timespec wakeuptime; |
/* equal to POSIX_task_endcycle */ |
proc_table[p].status = POSIX_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT,&wakeuptime); |
ADDUSEC2TIMESPEC(usdelay,&wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
POSIX_timer_delay, |
(void *)p); |
} |
static int POSIX_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void POSIX_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void POSIX_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
/*+ This init function install the "main" task +*/ |
463,7 → 310,7 |
if (p == NIL) |
printk("\nPanic!!! can't create main task...\n"); |
POSIX_task_activate(lev,p); |
POSIX_public_activate(lev,p); |
} |
471,7 → 318,7 |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void POSIX_register_level(TIME slice, |
LEVEL POSIX_register_level(TIME slice, |
int createmain, |
struct multiboot_info *mb, |
int prioritylevels) |
483,55 → 330,23 |
printk("POSIX_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(POSIX_level_des)); |
printk(" alloco descrittore %d %d\n",l,(int)sizeof(POSIX_level_des)); |
lev = (POSIX_level_des *)level_table[l]; |
/* alloc the space needed for the POSIX_level_des */ |
lev = (POSIX_level_des *)kern_alloc(sizeof(POSIX_level_des)); |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, POSIX_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = POSIX_LEVEL_CODE; |
lev->l.level_version = POSIX_LEVEL_VERSION; |
lev->l.public_scheduler = POSIX_public_scheduler; |
lev->l.public_create = POSIX_public_create; |
lev->l.public_end = POSIX_public_end; |
lev->l.public_dispatch = POSIX_public_dispatch; |
lev->l.public_epilogue = POSIX_public_epilogue; |
lev->l.public_activate = POSIX_public_activate; |
lev->l.public_unblock = POSIX_public_unblock; |
lev->l.public_block = POSIX_public_block; |
lev->l.public_message = POSIX_public_message; |
lev->l.level_accept_task_model = POSIX_level_accept_task_model; |
lev->l.level_accept_guest_model = POSIX_level_accept_guest_model; |
lev->l.level_status = POSIX_level_status; |
lev->l.level_scheduler = POSIX_level_scheduler; |
lev->l.level_guarantee = POSIX_level_guarantee; |
lev->l.task_create = POSIX_task_create; |
lev->l.task_detach = POSIX_task_detach; |
lev->l.task_eligible = POSIX_task_eligible; |
lev->l.task_dispatch = POSIX_task_dispatch; |
lev->l.task_epilogue = POSIX_task_epilogue; |
lev->l.task_activate = POSIX_task_activate; |
lev->l.task_insert = POSIX_task_insert; |
lev->l.task_extract = POSIX_task_extract; |
lev->l.task_endcycle = POSIX_task_endcycle; |
lev->l.task_end = POSIX_task_end; |
lev->l.task_sleep = POSIX_task_sleep; |
lev->l.task_delay = POSIX_task_delay; |
lev->l.guest_create = POSIX_guest_create; |
lev->l.guest_detach = POSIX_guest_detach; |
lev->l.guest_dispatch = POSIX_guest_dispatch; |
lev->l.guest_epilogue = POSIX_guest_epilogue; |
lev->l.guest_activate = POSIX_guest_activate; |
lev->l.guest_insert = POSIX_guest_insert; |
lev->l.guest_extract = POSIX_guest_extract; |
lev->l.guest_endcycle = POSIX_guest_endcycle; |
lev->l.guest_end = POSIX_guest_end; |
lev->l.guest_sleep = POSIX_guest_sleep; |
lev->l.guest_delay = POSIX_guest_delay; |
/* fill the POSIX descriptor part */ |
for (i = 0; i < MAX_PROC; i++) |
lev->nact[i] = -1; |
538,10 → 353,10 |
lev->maxpriority = prioritylevels -1; |
lev->ready = (QQUEUE *)kern_alloc(sizeof(QQUEUE) * prioritylevels); |
lev->ready = (IQUEUE *)kern_alloc(sizeof(IQUEUE) * prioritylevels); |
for (x = 0; x < prioritylevels; x++) |
qq_init(&lev->ready[x]); |
iq_init(&lev->ready[x], &freedesc, 0); |
if (slice < POSIX_MINIMUM_SLICE) slice = POSIX_MINIMUM_SLICE; |
if (slice > POSIX_MAXIMUM_SLICE) slice = POSIX_MAXIMUM_SLICE; |
551,6 → 366,8 |
if (createmain) |
sys_atrunlevel(POSIX_call_main,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
/*+ this function forces the running task to go to his queue tail; |
559,13 → 376,6 |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
if (l < 0 || l >= sched_levels) |
return -1; |
if (level_table[l]->level_code != POSIX_LEVEL_CODE || |
level_table[l]->level_version != POSIX_LEVEL_VERSION ) |
return -1; |
if (proc_table[exec_shadow].task_level != l) |
return -1; |
596,13 → 406,6 |
returns ENOSYS or ESRCH if there are problems +*/ |
int POSIX_getschedparam(LEVEL l, PID p, int *policy, int *priority) |
{ |
if (l < 0 || l >= sched_levels) |
return ENOSYS; |
if (level_table[l]->level_code != POSIX_LEVEL_CODE || |
level_table[l]->level_version != POSIX_LEVEL_VERSION ) |
return ENOSYS; |
if (p<0 || p>= MAX_PROC || proc_table[p].status == FREE) |
return ESRCH; |
614,7 → 417,7 |
else |
*policy = NRT_FIFO_POLICY; |
*priority = proc_table[p].priority; |
*priority = ((POSIX_level_des *)(level_table[l]))->priority[p]; |
return 0; |
} |
624,13 → 427,6 |
{ |
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]); |
if (l < 0 || l >= sched_levels) |
return ENOSYS; |
if (level_table[l]->level_code != POSIX_LEVEL_CODE || |
level_table[l]->level_version != POSIX_LEVEL_VERSION ) |
return ENOSYS; |
if (p<0 || p>= MAX_PROC || proc_table[p].status == FREE) |
return ESRCH; |
644,14 → 440,14 |
else |
return EINVAL; |
if (proc_table[p].priority != priority) { |
if (lev->priority[p] != priority) { |
if (proc_table[p].status == POSIX_READY) { |
qq_extract(p,&lev->ready[proc_table[p].priority]); |
proc_table[p].priority = priority; |
qq_insertlast(p,&lev->ready[priority]); |
iq_extract(p,&lev->ready[lev->priority[p]]); |
lev->priority[p] = priority; |
iq_insertlast(p,&lev->ready[priority]); |
} |
else |
proc_table[p].priority = priority; |
lev->priority[p] = priority; |
} |
return 0; |
/shark/tags/rel_0_3/kernel/modules/pc.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: pc.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: pc.c,v 1.2 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
Priority Ceiling protocol. see pc.h for more details... |
57,7 → 57,6 |
#include <ll/string.h> |
#include <ll/stdio.h> |
#include <kernel/const.h> |
#include <modules/codes.h> |
#include <sys/types.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
153,7 → 152,7 |
} |
#if 0 |
/*+ print resource protocol statistics...+*/ |
static void PC_resource_status(RLEVEL r) |
{ |
172,23 → 171,24 |
// in the future: print the status of the blocked semaphores! |
} |
#endif |
static int PC_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[l]); |
PC_RES_MODEL *pc; |
static int PC_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
{ |
if (r->rclass == PC_RCLASS || r->rclass == (PC_RCLASS | l) ) |
return 0; |
else |
if (r->rclass != PC_RCLASS) |
return -1; |
} |
if (r->level && r->level !=l) |
return -1; |
static void PC_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[l]); |
PC_RES_MODEL *pc = (PC_RES_MODEL *)r; |
pc = (PC_RES_MODEL *)r; |
m->priority[p] = pc->priority; |
m->nlocked[p] = 0; |
return 0; |
} |
static void PC_res_detach(RLEVEL l, PID p) |
203,18 → 203,13 |
m->priority[p] = MAX_DWORD; |
} |
static int PC_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a) |
{ |
if (a->mclass == PC_MCLASS || a->mclass == (PC_MCLASS | l) ) |
return 0; |
else |
return -1; |
} |
static int PC_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
PC_mutex_t *p; |
if (a->mclass != PC_MCLASS) |
return -1; |
p = (PC_mutex_t *) kern_alloc(sizeof(PC_mutex_t)); |
/* control if there is enough memory; no control on init on a |
403,7 → 398,7 |
return 0; |
} |
void PC_register_module(void) |
RLEVEL PC_register_module(void) |
{ |
RLEVEL l; /* the level that we register */ |
PC_mutex_resource_des *m; /* for readableness only */ |
421,20 → 416,11 |
resource_table[l] = (resource_des *)m; |
/* fill the resource_des descriptor */ |
strncpy(m->m.r.res_name, PC_MODULENAME, MAX_MODULENAME); |
m->m.r.res_code = PC_MODULE_CODE; |
m->m.r.res_version = PC_MODULE_VERSION; |
m->m.r.rtype = MUTEX_RTYPE; |
m->m.r.resource_status = PC_resource_status; |
m->m.r.level_accept_resource_model = PC_level_accept_resource_model; |
m->m.r.res_register = PC_res_register; |
m->m.r.res_detach = PC_res_detach; |
/* fill the mutex_resource_des descriptor */ |
m->m.level_accept_mutexattr = PC_level_accept_mutexattr; |
m->m.init = PC_init; |
m->m.destroy = PC_destroy; |
m->m.lock = PC_lock; |
447,6 → 433,8 |
m->mlist = NULL; |
return l; |
} |
/*+ This function gets the ceiling of a PC mutex, and it have to be called |
461,11 → 449,6 |
r = resource_table[mutex->mutexlevel]; |
if (r->rtype != MUTEX_RTYPE || |
r->res_code != PC_MODULE_CODE || |
r->res_version != PC_MODULE_VERSION) |
return -1; |
if (ceiling) |
*ceiling = ((PC_mutex_t *)mutex->opt)->ceiling; |
else |
486,11 → 469,6 |
r = resource_table[mutex->mutexlevel]; |
if (r->rtype != MUTEX_RTYPE || |
r->res_code != PC_MODULE_CODE || |
r->res_version != PC_MODULE_VERSION) |
return -1; |
if (old_ceiling) |
*old_ceiling = ((PC_mutex_t *)mutex->opt)->ceiling; |
/shark/tags/rel_0_3/kernel/modules/bd_edf.c |
---|
38,11 → 38,11 |
*/ |
/* |
* CVS : $Id: bd_edf.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
* CVS : $Id: bd_edf.c,v 1.2 2003-01-07 17:07:50 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:52 $ |
* Revision: $Revision: 1.2 $ |
* Last update: $Date: 2003-01-07 17:07:50 $ |
*/ |
#include <modules/bd_edf.h> |
51,7 → 51,6 |
#include <ll/string.h> |
#include <ll/stdio.h> |
#include <kernel/const.h> |
#include <modules/codes.h> |
#include <sys/types.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
74,12 → 73,21 |
return -1; |
} |
static void res_register(RLEVEL l, PID p, RES_MODEL *r) |
static int res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
bd_edf_resource_des *m=(bd_edf_resource_des*)(resource_table[l]); |
BDEDF_RES_MODEL *rm=(BDEDF_RES_MODEL*)r; |
BDEDF_RES_MODEL *rm; |
if (r->rclass!=BDEDF_RCLASS) |
return -1; |
if (r->level && r->level !=l) |
return -1; |
rm=(BDEDF_RES_MODEL*)r; |
assertk(mylevel==l); |
m->dl[p]=rm->dl; |
return 0; |
} |
static void res_detach(RLEVEL l, PID p) |
89,10 → 97,7 |
m->dl[p]=0; |
} |
static void res_resource_status(void) |
{} |
void BD_EDF_register_module(void) |
RLEVEL BD_EDF_register_module(void) |
{ |
RLEVEL l; |
bd_edf_resource_des *m; |
108,12 → 113,7 |
resource_table[l]=(resource_des*)m; |
/* fill the resource_des descriptor */ |
strcpy(m->rd.res_name,BDEDF_MODULENAME); |
m->rd.res_code=BDEDF_MODULE_CODE; |
m->rd.res_version=BDEDF_MODULE_VERSION; |
m->rd.rtype=DEFAULT_RTYPE; |
m->rd.resource_status=res_resource_status; |
m->rd.level_accept_resource_model=res_level_accept_resource_model; |
m->rd.res_register=res_register; |
m->rd.res_detach=res_detach; |
121,6 → 121,8 |
assertk(mylevel==-1); |
mylevel=l; |
return l; |
} |
TIME bd_edf_getdl(void) |
/shark/tags/rel_0_3/kernel/modules/srp.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: srp.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: srp.c,v 1.3 2003-01-07 17:07:51 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:51 $ |
------------ |
Stack Resource Policy. see srp.h for general details... |
141,7 → 141,6 |
#include <ll/string.h> |
#include <ll/stdio.h> |
#include <kernel/const.h> |
#include <modules/codes.h> |
#include <sys/types.h> |
#include <kernel/descr.h> |
#include <kernel/var.h> |
385,27 → 384,14 |
} |
/*+ print resource protocol statistics...+*/ |
static void SRP_resource_status(RLEVEL r) |
static int SRP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
kern_printf("SRP status not implemented yet"); |
} |
SRP_mutex_resource_des *m = (SRP_mutex_resource_des *)(resource_table[l]); |
static int SRP_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
{ |
if (r->rclass == SRP_RCLASS || r->rclass == (SRP_RCLASS | l) || |
r->rclass == SRP2_RCLASS || r->rclass == (SRP2_RCLASS | l)) |
return 0; |
else |
if (r->level && r->level !=l) |
return -1; |
} |
static void SRP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
SRP_mutex_resource_des *m = (SRP_mutex_resource_des *)(resource_table[l]); |
if (r->rclass == SRP_RCLASS || r->rclass == (SRP_RCLASS | l)) { |
if (r->rclass == SRP_RCLASS) { |
/* SRP_RES_MODEL resource model */ |
// kern_printf("!%d %d",((SRP_RES_MODEL *)r)->preempt,p); |
429,14 → 415,15 |
} |
m->nlocked[p] = 0; |
return 0; |
} |
else { |
else if (r->rclass == SRP2_RCLASS) { |
/* a mutex passed via SRP_useres() */ |
SRP_mutex_t *mut = (SRP_mutex_t *)r; |
if (mut->use[p]) |
/* the mutex is already registered, do nothing! */ |
return; |
return -1; |
/* register the mutex for the task */ |
mut->use[p] = 1; |
449,7 → 436,10 |
mut->ceiling = m->proc_preempt[p].preempt; |
} |
return 0; |
} |
else |
return -1; |
} |
static void SRP_res_detach(RLEVEL l, PID p) |
488,14 → 478,6 |
SRP_extract_tasklist(m, p); |
} |
static int SRP_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a) |
{ |
if (a->mclass == SRP_MCLASS || a->mclass == (SRP_MCLASS | l) ) |
return 0; |
else |
return -1; |
} |
static int SRP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
SRP_mutex_resource_des *lev = (SRP_mutex_resource_des *)(resource_table[l]); |
502,6 → 484,9 |
SRP_mutex_t *p; |
PID x; |
if (a->mclass != SRP_MCLASS) |
return -1; |
p = (SRP_mutex_t *) kern_alloc(sizeof(SRP_mutex_t)); |
/* control if there is enough memory; no control on init on a |
595,7 → 580,7 |
// lev, mut->owner, |
// mut->use[exec_shadow], |
// lev->proc_preempt[exec_shadow].preempt,exec_shadow); |
kern_raise(XSRP_UNVALID_LOCK, exec_shadow); |
kern_raise(XSRP_INVALID_LOCK, exec_shadow); |
kern_sti(); |
return (EINVAL); |
} |
719,7 → 704,7 |
/* activate the task if it was activated while in lobby list! */ |
if (task_unblock_activation(x)) { |
LEVEL sl = proc_table[x].task_level; |
level_table[sl]->task_activate(sl,x); |
level_table[sl]->public_activate(sl,x); |
// kern_printf("activate it!!!"); |
} |
} |
736,7 → 721,7 |
return 0; |
} |
void SRP_register_module(void) |
RLEVEL SRP_register_module(void) |
{ |
RLEVEL l; /* the level that we register */ |
SRP_mutex_resource_des *m; /* for readableness only */ |
754,20 → 739,11 |
resource_table[l] = (resource_des *)m; |
/* fill the resource_des descriptor */ |
strncpy(m->m.r.res_name, SRP_MODULENAME, MAX_MODULENAME); |
m->m.r.res_code = SRP_MODULE_CODE; |
m->m.r.res_version = SRP_MODULE_VERSION; |
m->m.r.rtype = MUTEX_RTYPE; |
m->m.r.resource_status = SRP_resource_status; |
m->m.r.level_accept_resource_model = SRP_level_accept_resource_model; |
m->m.r.res_register = SRP_res_register; |
m->m.r.res_detach = SRP_res_detach; |
/* fill the mutex_resource_des descriptor */ |
m->m.level_accept_mutexattr = SRP_level_accept_mutexattr; |
m->m.init = SRP_init; |
m->m.destroy = SRP_destroy; |
m->m.lock = SRP_lock; |
789,5 → 765,7 |
m->srpstack = NULL; |
m->srprecalc = NULL; |
m->srplist = NULL; |
return l; |
} |
/shark/tags/rel_0_3/kernel/modules/rr2.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: rr2.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: rr2.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the scheduling module RR2 (Round Robin) version 2 |
60,10 → 60,10 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/*+ Status used in the level +*/ |
#define RR2_READY MODULE_STATUS_BASE |
#define RR2_DELAY MODULE_STATUS_BASE+1 |
/*+ the level redefinition for the Round Robin level +*/ |
typedef struct { |
71,7 → 71,7 |
int nact[MAX_PROC]; /*+ number of pending activations +*/ |
QQUEUE ready; /*+ the ready queue +*/ |
IQUEUE ready; /*+ the ready queue +*/ |
int slice; /*+ the level's time slice +*/ |
80,77 → 80,11 |
} RR2_level_des; |
static char *RR2_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case RR2_READY: return "RR2_Ready"; |
case RR2_DELAY: return "RR2_Delay"; |
default : return "RR2_Unknown"; |
} |
} |
/*+ this function is called when a task finish his delay +*/ |
static void RR2_timer_delay(void *par) |
{ |
PID p = (PID) par; |
RR2_level_des *lev; |
lev = (RR2_level_des *)level_table[proc_table[p].task_level]; |
proc_table[p].status = RR2_READY; |
qq_insertlast(p,&lev->ready); |
proc_table[p].delay_timer = NIL; /* Paranoia */ |
// kern_printf(" DELAY TIMER %d ", p); |
event_need_reschedule(); |
} |
static int RR2_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l)) |
return 0; |
else |
return -1; |
} |
static int RR2_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static void RR2_level_status(LEVEL l) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->ready); |
kern_printf("Slice: %d \n", lev->slice); |
while (p != NIL) { |
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name, |
RR2_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != RR2_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name, |
RR2_status_to_a(proc_table[p].status)); |
} |
/* This is not efficient but very fair :-) |
The need of all this stuff is because if a task execute a long time |
due to (shadow!) priority inheritance, then the task shall go to the |
tail of the queue many times... */ |
static PID RR2_level_scheduler(LEVEL l) |
static PID RR2_public_scheduler(LEVEL l) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
157,14 → 91,14 |
PID p; |
for (;;) { |
p = qq_queryfirst(&lev->ready); |
p = iq_query_first(&lev->ready); |
if (p == -1) |
return p; |
if (proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_extract(p,&lev->ready); |
qq_insertlast(p,&lev->ready); |
iq_extract(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
else |
return p; |
171,20 → 105,15 |
} |
} |
static int RR2_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int RR2_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
/* the RR2 level always guarantee... the function is defined because |
there can be an aperiodic server at a level with less priority than |
the RR2 that need guarantee (e.g., a TBS server) */ |
return 1; |
} |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
NRT_TASK_MODEL *nrt; |
if (m->pclass != NRT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
nrt = (NRT_TASK_MODEL *)m; |
static int RR2_task_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m; |
/* the task state is set at SLEEP by the general task_create |
the only thing to set remains the capacity stuffs that are set |
to the values passed in the model... */ |
210,48 → 139,17 |
return 0; /* OK */ |
} |
static void RR2_task_detach(LEVEL l, PID p) |
static void RR2_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the RR2 level doesn't introduce any new field in the TASK_MODEL |
so, all detach stuffs are done by the task_create |
The task state is set at FREE by the general task_create */ |
} |
static int RR2_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void RR2_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
qq_extract(p, &lev->ready); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
iq_extract(p, &lev->ready); |
} |
static void RR2_task_epilogue(LEVEL l, PID p) |
static void RR2_public_epilogue(LEVEL l, PID p) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
259,16 → 157,16 |
qqueue position */ |
if (proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
else |
/* cuRR2 is >0, so the running task have to run for another cuRR2 usec */ |
qq_insertfirst(p,&lev->ready); |
iq_insertfirst(p,&lev->ready); |
proc_table[p].status = RR2_READY; |
} |
static void RR2_task_activate(LEVEL l, PID p) |
static void RR2_public_activate(LEVEL l, PID p) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
280,26 → 178,24 |
return; |
} |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
/* Insert task in the coRR2ect position */ |
proc_table[p].status = RR2_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
static void RR2_task_insert(LEVEL l, PID p) |
static void RR2_public_unblock(LEVEL l, PID p) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
/* Similar to RR2_task_activate, but we don't check in what state |
the task is and we don't set the request_time */ |
/* Similar to RR2_task_activate, |
but we don't check in what state the task is */ |
/* Insert task in the coRR2ect position */ |
proc_table[p].status = RR2_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
static void RR2_task_extract(LEVEL l, PID p) |
static void RR2_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
311,22 → 207,26 |
*/ |
} |
static void RR2_task_endcycle(LEVEL l, PID p) |
static int RR2_public_message(LEVEL l, PID p, void *m) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
if (lev->nact[p] > 0) { |
/* continue!!!! */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
lev->nact[p]--; |
qq_insertfirst(p,&lev->ready); |
iq_insertfirst(p,&lev->ready); |
proc_table[p].status = RR2_READY; |
} |
else |
proc_table[p].status = SLEEP; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
return 0; |
} |
static void RR2_task_end(LEVEL l, PID p) |
static void RR2_public_end(LEVEL l, PID p) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
334,69 → 234,9 |
/* then, we insert the task in the free queue */ |
proc_table[p].status = FREE; |
q_insert(p,&freedesc); |
iq_insertlast(p,&freedesc); |
} |
static void RR2_task_sleep(LEVEL l, PID p) |
{ |
RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
if (lev->nact[p] >= 0) lev->nact[p] = 0; |
proc_table[p].status = SLEEP; |
} |
static void RR2_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
// RR2_level_des *lev = (RR2_level_des *)(level_table[l]); |
struct timespec wakeuptime; |
/* equal to RR2_task_endcycle */ |
proc_table[p].status = RR2_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT,&wakeuptime); |
ADDUSEC2TIMESPEC(usdelay,&wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
RR2_timer_delay, |
(void *)p); |
} |
static int RR2_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void RR2_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR2_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
/*+ This init function install the "main" task +*/ |
424,7 → 264,7 |
if (p == NIL) |
printk("\nPanic!!! can't create main task...\n"); |
RR2_task_activate(lev,p); |
RR2_public_activate(lev,p); |
} |
432,11 → 272,11 |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void RR2_register_level(TIME slice, |
LEVEL RR2_register_level(TIME slice, |
int createmain, |
struct multiboot_info *mb) |
{ |
LEVEL l; /* the level that we register */ |
LEVEL l; /* the level that we register */ |
RR2_level_des *lev; /* for readableness only */ |
PID i; |
443,57 → 283,28 |
printk("RR2_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(RR2_level_des)); |
/* alloc the space needed for the RR2_level_des */ |
lev = (RR2_level_des *)kern_alloc(sizeof(RR2_level_des)); |
lev = (RR2_level_des *)level_table[l]; |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, RR2_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = RR2_LEVEL_CODE; |
lev->l.level_version = RR2_LEVEL_VERSION; |
lev->l.public_scheduler = RR2_public_scheduler; |
lev->l.public_create = RR2_public_create; |
lev->l.public_end = RR2_public_end; |
lev->l.public_dispatch = RR2_public_dispatch; |
lev->l.public_epilogue = RR2_public_epilogue; |
lev->l.public_activate = RR2_public_activate; |
lev->l.public_unblock = RR2_public_unblock; |
lev->l.public_block = RR2_public_block; |
lev->l.public_message = RR2_public_message; |
lev->l.level_accept_task_model = RR2_level_accept_task_model; |
lev->l.level_accept_guest_model = RR2_level_accept_guest_model; |
lev->l.level_status = RR2_level_status; |
lev->l.level_scheduler = RR2_level_scheduler; |
lev->l.level_guarantee = RR2_level_guarantee; |
lev->l.task_create = RR2_task_create; |
lev->l.task_detach = RR2_task_detach; |
lev->l.task_eligible = RR2_task_eligible; |
lev->l.task_dispatch = RR2_task_dispatch; |
lev->l.task_epilogue = RR2_task_epilogue; |
lev->l.task_activate = RR2_task_activate; |
lev->l.task_insert = RR2_task_insert; |
lev->l.task_extract = RR2_task_extract; |
lev->l.task_endcycle = RR2_task_endcycle; |
lev->l.task_end = RR2_task_end; |
lev->l.task_sleep = RR2_task_sleep; |
lev->l.task_delay = RR2_task_delay; |
lev->l.guest_create = RR2_guest_create; |
lev->l.guest_detach = RR2_guest_detach; |
lev->l.guest_dispatch = RR2_guest_dispatch; |
lev->l.guest_epilogue = RR2_guest_epilogue; |
lev->l.guest_activate = RR2_guest_activate; |
lev->l.guest_insert = RR2_guest_insert; |
lev->l.guest_extract = RR2_guest_extract; |
lev->l.guest_endcycle = RR2_guest_endcycle; |
lev->l.guest_end = RR2_guest_end; |
lev->l.guest_sleep = RR2_guest_sleep; |
lev->l.guest_delay = RR2_guest_delay; |
/* fill the RR2 descriptor part */ |
for (i = 0; i < MAX_PROC; i++) |
lev->nact[i] = -1; |
qq_init(&lev->ready); |
iq_init(&lev->ready, &freedesc, 0); |
if (slice < RR2_MINIMUM_SLICE) slice = RR2_MINIMUM_SLICE; |
if (slice > RR2_MAXIMUM_SLICE) slice = RR2_MAXIMUM_SLICE; |
503,6 → 314,8 |
if (createmain) |
sys_atrunlevel(RR2_call_main,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
/shark/tags/rel_0_3/kernel/modules/ds.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: ds.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: ds.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the aperiodic server DS (Deferrable Server) |
64,6 → 64,7 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/*+ Status used in the level +*/ |
#define DS_WAIT APER_STATUS_BASE /*+ waiting the service +*/ |
83,7 → 84,7 |
int Cs; /*+ server capacity +*/ |
int availCs; /*+ server avail time +*/ |
QQUEUE wait; /*+ the wait queue of the DS +*/ |
IQUEUE wait; /*+ the wait queue of the DS +*/ |
PID activated; /*+ the task inserted in another queue +*/ |
int flags; /*+ the init flags... +*/ |
106,8 → 107,7 |
m = lev->scheduling_level; |
job_task_default_model(j,lev->lastdline); |
job_task_def_period(j,lev->period); |
level_table[m]->guest_create(m,p,(TASK_MODEL *)&j); |
level_table[m]->guest_activate(m,p); |
level_table[m]->private_insert(m,p,(TASK_MODEL *)&j); |
// kern_printf("(%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec); |
} |
128,8 → 128,8 |
was not any other task to be put in the ready queue |
... we are now activating the next task */ |
if (lev->availCs > 0 && lev->activated == NIL) { |
if (qq_queryfirst(&lev->wait) != NIL) { |
lev->activated = qq_getfirst(&lev->wait); |
if (iq_query_first(&lev->wait) != NIL) { |
lev->activated = iq_getfirst(&lev->wait); |
DS_activation(lev); |
event_need_reschedule(); |
} |
139,80 → 139,8 |
// kern_printf("!"); |
} |
static char *DS_status_to_a(WORD status) |
static PID DS_public_schedulerbackground(LEVEL l) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case DS_WAIT : return "DS_Wait"; |
default : return "DS_Unknown"; |
} |
} |
static int DS_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) { |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
if (s->periodicity == APERIODIC) |
return 0; |
} |
return -1; |
} |
static int DS_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
static void DS_level_status(LEVEL l) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->wait); |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & DS_ENABLE_GUARANTEE_EDF || |
lev->flags & DS_ENABLE_GUARANTEE_RM )); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
if (lev->activated != -1) |
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n", |
lev->activated, |
proc_table[lev->activated].name, |
proc_table[lev->activated].timespec_priority.tv_sec, |
proc_table[lev->activated].timespec_priority.tv_nsec, |
lev->nact[lev->activated], |
DS_status_to_a(proc_table[lev->activated].status)); |
while (p != NIL) { |
kern_printf("Pid: %2d Name: %10s Stat: %s\n", |
p, |
proc_table[p].name, |
DS_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
} |
static PID DS_level_scheduler(LEVEL l) |
{ |
/* the DS don't schedule anything... |
it's an EDF level or similar that do it! */ |
return NIL; |
} |
static PID DS_level_schedulerbackground(LEVEL l) |
{ |
/* the DS catch the background time to exec aperiodic activities */ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
221,11 → 149,11 |
if (lev->flags & DS_BACKGROUND_BLOCK) |
return NIL; |
else |
return qq_queryfirst(&lev->wait); |
return iq_query_first(&lev->wait); |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int DS_level_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth) |
static int DS_public_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
237,7 → 165,7 |
return 0; |
} |
static int DS_level_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth) |
static int DS_public_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
249,14 → 177,19 |
return 0; |
} |
static int DS_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int DS_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
/* if the DS_task_create is called, then the pclass must be a |
valid pclass. */ |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
SOFT_TASK_MODEL *s; |
if (m->pclass != SOFT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (s->periodicity != APERIODIC) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (s->arrivals == SAVE_ARRIVALS) |
lev->nact[p] = 0; |
else |
265,26 → 198,8 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
static void DS_task_detach(LEVEL l, PID p) |
static void DS_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the DS level doesn't introduce any dinamic allocated new field. */ |
} |
static int DS_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void DS_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
struct timespec ty; |
294,13 → 209,13 |
to exe before calling task_dispatch. we have to check |
lev->activated != p instead */ |
if (lev->activated != p) { |
qq_extract(p, &lev->wait); |
iq_extract(p, &lev->wait); |
//kern_printf("#%d#",p); |
} |
else { |
//if (nostop) kern_printf("(gd status=%d)",proc_table[p].status); |
level_table[ lev->scheduling_level ]-> |
guest_dispatch(lev->scheduling_level,p,nostop); |
private_dispatch(lev->scheduling_level,p,nostop); |
} |
/* set the capacity timer */ |
311,19 → 226,9 |
} |
// kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
} |
static void DS_task_epilogue(LEVEL l, PID p) |
static void DS_public_epilogue(LEVEL l, PID p) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
struct timespec ty; |
355,8 → 260,8 |
task point the shadow to it!!!*/ |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
qq_insertfirst(p, &lev->wait); |
private_extract(lev->scheduling_level,p); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = DS_WAIT; |
lev->activated = NIL; |
} |
365,14 → 270,14 |
wait queue by calling the guest_epilogue... */ |
if (lev->activated == p) {//kern_printf("Û1"); |
level_table[ lev->scheduling_level ]-> |
guest_epilogue(lev->scheduling_level,p); |
private_epilogue(lev->scheduling_level,p); |
} else { //kern_printf("Û2"); |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = DS_WAIT; |
} |
} |
static void DS_task_activate(LEVEL l, PID p) |
static void DS_public_activate(LEVEL l, PID p) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
381,7 → 286,6 |
lev->nact[p]++; |
} |
else if (proc_table[p].status == SLEEP) { |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
if (lev->activated == NIL && lev->availCs > 0) { |
lev->activated = p; |
388,7 → 292,7 |
DS_activation(lev); |
} |
else { |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
proc_table[p].status = DS_WAIT; |
} |
} |
398,7 → 302,7 |
} |
static void DS_task_insert(LEVEL l, PID p) |
static void DS_public_unblock(LEVEL l, PID p) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
408,11 → 312,11 |
/* when we reinsert the task into the system, the server capacity |
is always 0 because nobody executes with the DS before... */ |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = DS_WAIT; |
} |
static void DS_task_extract(LEVEL l, PID p) |
static void DS_public_block(LEVEL l, PID p) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
423,10 → 327,10 |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
} |
static void DS_task_endcycle(LEVEL l, PID p) |
static int DS_public_message(LEVEL l, PID p, void *m) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
struct timespec ty; |
443,52 → 347,30 |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
else |
qq_extract(p, &lev->wait); |
iq_extract(p, &lev->wait); |
if (lev->nact[p] > 0) |
{ |
lev->nact[p]--; |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
proc_table[p].status = DS_WAIT; |
} |
else |
proc_table[p].status = SLEEP; |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
DS_activation(lev); |
} |
static void DS_task_end(LEVEL l, PID p) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
struct timespec ty; |
TIME tx; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
/* update the server capacity */ |
if (lev->flags & DS_BACKGROUND) |
lev->flags &= ~DS_BACKGROUND; |
else { |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
lev->availCs -= tx; |
} |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
lev->activated = qq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
DS_activation(lev); |
return 0; |
} |
static void DS_task_sleep(LEVEL l, PID p) |
static void DS_public_end(LEVEL l, PID p) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
struct timespec ty; |
503,78 → 385,18 |
lev->availCs -= tx; |
} |
if (lev->nact[p] >= 0) lev->nact[p] = 0; |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
else |
qq_extract(p, &lev->wait); |
private_extract(lev->scheduling_level,p); |
proc_table[p].status = SLEEP; |
proc_table[p].status = FREE; |
iq_insertfirst(p,&freedesc); |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
DS_activation(lev); |
} |
static void DS_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
struct timespec ty; |
TIME tx; |
/* update the server capacity */ |
if (lev->flags & DS_BACKGROUND) |
lev->flags &= ~DS_BACKGROUND; |
else { |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
lev->availCs -= tx; |
} |
/* I hope no delay when owning a mutex... */ |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_delay(lev->scheduling_level,p,usdelay); |
} |
static int DS_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void DS_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void DS_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
584,7 → 406,7 |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[(LEVEL)l]); |
ll_gettime(TIME_EXACT,&lev->lastdline); |
kern_gettime(&lev->lastdline); |
ADDUSEC2TIMESPEC(lev->period, &lev->lastdline); |
kern_event_post(&lev->lastdline, DS_deadline_timer, l); |
594,7 → 416,7 |
/*+ Registration function: |
int flags the init flags ... see DS.h +*/ |
void DS_register_level(int flags, LEVEL master, int Cs, int per) |
LEVEL DS_register_level(int flags, LEVEL master, int Cs, int per) |
{ |
LEVEL l; /* the level that we register */ |
DS_level_des *lev; /* for readableness only */ |
603,64 → 425,33 |
printk("DS_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(DS_level_des)); |
printk(" alloco descrittore %d %d\n",l,(int)sizeof(DS_level_des)); |
lev = (DS_level_des *)level_table[l]; |
/* alloc the space needed for the DS_level_des */ |
lev = (DS_level_des *)kern_alloc(sizeof(DS_level_des)); |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, DS_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = DS_LEVEL_CODE; |
lev->l.level_version = DS_LEVEL_VERSION; |
lev->l.level_accept_task_model = DS_level_accept_task_model; |
lev->l.level_accept_guest_model = DS_level_accept_guest_model; |
lev->l.level_status = DS_level_status; |
if (flags & DS_ENABLE_BACKGROUND) |
lev->l.level_scheduler = DS_level_schedulerbackground; |
else |
lev->l.level_scheduler = DS_level_scheduler; |
lev->l.public_scheduler = DS_public_schedulerbackground; |
if (flags & DS_ENABLE_GUARANTEE_EDF) |
lev->l.level_guarantee = DS_level_guaranteeEDF; |
lev->l.public_guarantee = DS_public_guaranteeEDF; |
else if (flags & DS_ENABLE_GUARANTEE_RM) |
lev->l.level_guarantee = DS_level_guaranteeRM; |
lev->l.public_guarantee = DS_public_guaranteeRM; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.task_create = DS_task_create; |
lev->l.task_detach = DS_task_detach; |
lev->l.task_eligible = DS_task_eligible; |
lev->l.task_dispatch = DS_task_dispatch; |
lev->l.task_epilogue = DS_task_epilogue; |
lev->l.task_activate = DS_task_activate; |
lev->l.task_insert = DS_task_insert; |
lev->l.task_extract = DS_task_extract; |
lev->l.task_endcycle = DS_task_endcycle; |
lev->l.task_end = DS_task_end; |
lev->l.task_sleep = DS_task_sleep; |
lev->l.task_delay = DS_task_delay; |
lev->l.public_create = DS_public_create; |
lev->l.public_end = DS_public_end; |
lev->l.public_dispatch = DS_public_dispatch; |
lev->l.public_epilogue = DS_public_epilogue; |
lev->l.public_activate = DS_public_activate; |
lev->l.public_unblock = DS_public_unblock; |
lev->l.public_block = DS_public_block; |
lev->l.public_message = DS_public_message; |
lev->l.guest_create = DS_guest_create; |
lev->l.guest_detach = DS_guest_detach; |
lev->l.guest_dispatch = DS_guest_dispatch; |
lev->l.guest_epilogue = DS_guest_epilogue; |
lev->l.guest_activate = DS_guest_activate; |
lev->l.guest_insert = DS_guest_insert; |
lev->l.guest_extract = DS_guest_extract; |
lev->l.guest_endcycle = DS_guest_endcycle; |
lev->l.guest_end = DS_guest_end; |
lev->l.guest_sleep = DS_guest_sleep; |
lev->l.guest_delay = DS_guest_delay; |
/* fill the DS descriptor part */ |
for (i=0; i<MAX_PROC; i++) |
671,7 → 462,7 |
lev->period = per; |
qq_init(&lev->wait); |
iq_init(&lev->wait, &freedesc, 0); |
lev->activated = NIL; |
lev->U = (MAX_BANDWIDTH / per) * Cs; |
681,15 → 472,13 |
lev->flags = flags & 0x07; |
sys_atrunlevel(DS_dline_install,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
bandwidth_t DS_usedbandwidth(LEVEL l) |
{ |
DS_level_des *lev = (DS_level_des *)(level_table[l]); |
if (lev->l.level_code == DS_LEVEL_CODE && |
lev->l.level_version == DS_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
/shark/tags/rel_0_3/kernel/modules/cbs.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: cbs.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: cbs.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the aperiodic server CBS (Total Bandwidth Server) |
76,7 → 76,6 |
/*+ Status used in the level +*/ |
#define CBS_IDLE APER_STATUS_BASE /*+ waiting the activation +*/ |
#define CBS_ZOMBIE APER_STATUS_BASE+1 /*+ waiting the period end +*/ |
#define CBS_DELAY APER_STATUS_BASE+2 /*+ waiting the delay end +*/ |
/*+ task flags +*/ |
#define CBS_SAVE_ARRIVALS 1 |
173,26 → 172,9 |
job_task_default_model(job, lev->cbs_dline[p]); |
job_task_def_noexc(job); |
level_table[ lev->scheduling_level ]-> |
guest_create(lev->scheduling_level, p, (TASK_MODEL *)&job); |
level_table[ lev->scheduling_level ]-> |
guest_activate(lev->scheduling_level, p); |
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job); |
} |
static char *CBS_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case CBS_IDLE : return "CBS_Idle"; |
case CBS_ZOMBIE : return "CBS_Zombie"; |
case CBS_DELAY : return "CBS_Delay"; |
default : return "CBS_Unknown"; |
} |
} |
static void CBS_avail_time_check(CBS_level_des *lev, PID p) |
{ |
/* there is a while because if the wcet is << than the system tick |
253,20 → 235,6 |
} |
/*+ this function is called when a task finish his delay +*/ |
static void CBS_timer_delay(void *par) |
{ |
PID p = (PID) par; |
CBS_level_des *lev; |
lev = (CBS_level_des *)level_table[proc_table[p].task_level]; |
CBS_activation(lev,p,&proc_table[p].timespec_priority); |
event_need_reschedule(); |
} |
/*+ this function is called when a killed or ended task reach the |
period end +*/ |
static void CBS_timer_zombie(void *par) |
278,7 → 246,7 |
/* we finally put the task in the ready queue */ |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
iq_insertfirst(p,&freedesc); |
/* and free the allocated bandwidth */ |
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet; |
286,60 → 254,8 |
} |
static int CBS_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l)) { |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
if (s->met && s->period) |
return 0; |
} |
return -1; |
} |
static int CBS_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
static void CBS_level_status(LEVEL l) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
PID p; |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & CBS_ENABLE_GUARANTEE)); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != FREE ) |
kern_printf("Pid: %2d Name: %10s Period: %9ld Dline: %9ld.%6ld Stat: %s\n", |
p, |
proc_table[p].name, |
lev->period[p], |
lev->cbs_dline[p].tv_sec, |
lev->cbs_dline[p].tv_nsec/1000, |
CBS_status_to_a(proc_table[p].status)); |
} |
static PID CBS_level_scheduler(LEVEL l) |
{ |
/* the CBS don't schedule anything... |
it's an EDF level or similar that do it! */ |
return NIL; |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int CBS_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int CBS_public_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
356,14 → 272,18 |
return 0; |
} |
static int CBS_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int CBS_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
SOFT_TASK_MODEL *soft; |
/* if the CBS_task_create is called, then the pclass must be a |
valid pclass. */ |
SOFT_TASK_MODEL *soft = (SOFT_TASK_MODEL *)m; |
if (m->pclass != SOFT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
soft = (SOFT_TASK_MODEL *)m; |
if (!(soft->met && soft->period)) return -1; |
soft = (SOFT_TASK_MODEL *)m; |
/* Enable wcet check */ |
proc_table[p].avail_time = soft->met; |
proc_table[p].wcet = soft->met; |
399,7 → 319,7 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
static void CBS_task_detach(LEVEL l, PID p) |
static void CBS_public_detach(LEVEL l, PID p) |
{ |
/* the CBS level doesn't introduce any dinamic allocated new field. |
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated |
413,7 → 333,7 |
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet; |
} |
static int CBS_task_eligible(LEVEL l, PID p) |
static int CBS_public_eligible(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
JOB_TASK_MODEL job; |
428,7 → 348,7 |
if ( TIMESPEC_A_LT_B(&lev->cbs_dline[p], &schedule_time) ) { |
/* we kill the current activation */ |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level, p); |
private_extract(lev->scheduling_level, p); |
/* we modify the deadline ... */ |
TIMESPEC_ASSIGN(&lev->cbs_dline[p], &schedule_time); |
441,9 → 361,7 |
job_task_default_model(job, lev->cbs_dline[p]); |
job_task_def_noexc(job); |
level_table[ lev->scheduling_level ]-> |
guest_create(lev->scheduling_level, p, (TASK_MODEL *)&job); |
level_table[ lev->scheduling_level ]-> |
guest_activate(lev->scheduling_level, p); |
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job); |
return -1; |
} |
451,32 → 369,14 |
return 0; |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void CBS_task_dispatch(LEVEL l, PID p, int nostop) |
static void CBS_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
level_table[ lev->scheduling_level ]-> |
guest_dispatch(lev->scheduling_level,p,nostop); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
private_dispatch(lev->scheduling_level,p,nostop); |
} |
static void CBS_task_epilogue(LEVEL l, PID p) |
static void CBS_public_epilogue(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
JOB_TASK_MODEL job; |
485,7 → 385,7 |
if ( proc_table[p].avail_time <= 0) { |
/* we kill the current activation */ |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level, p); |
private_extract(lev->scheduling_level, p); |
/* we modify the deadline according to rule 4 ... */ |
CBS_avail_time_check(lev, p); |
494,9 → 394,7 |
job_task_default_model(job, lev->cbs_dline[p]); |
job_task_def_noexc(job); |
level_table[ lev->scheduling_level ]-> |
guest_create(lev->scheduling_level, p, (TASK_MODEL *)&job); |
level_table[ lev->scheduling_level ]-> |
guest_activate(lev->scheduling_level, p); |
private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job); |
// kern_printf("epil : dl %d per %d p %d |\n", |
// lev->cbs_dline[p].tv_nsec/1000,lev->period[p],p); |
505,12 → 403,13 |
/* the task has been preempted. it returns into the ready queue by |
calling the guest_epilogue... */ |
level_table[ lev->scheduling_level ]-> |
guest_epilogue(lev->scheduling_level,p); |
private_epilogue(lev->scheduling_level,p); |
} |
static void CBS_task_activate(LEVEL l, PID p) |
static void CBS_public_activate(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
struct timespec t; |
/* save activation (only if needed... */ |
if (proc_table[p].status != SLEEP) { |
519,9 → 418,9 |
return; |
} |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
kern_gettime(&t); |
CBS_activation(lev, p, &proc_table[p].request_time); |
CBS_activation(lev, p, &t); |
/* Set the reactivation timer */ |
if (!(lev->flag[p] & CBS_APERIODIC)) |
530,7 → 429,7 |
the deadline may be != from actual_time + period |
(if we call the task_activate after a task_sleep, and the |
deadline was postponed a lot...) */ |
TIMESPEC_ASSIGN(&lev->reactivation_time[p], &proc_table[p].request_time); |
TIMESPEC_ASSIGN(&lev->reactivation_time[p], &t); |
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]); |
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]); |
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p], |
543,17 → 442,17 |
// kern_printf("act : %d %d |",lev->cbs_dline[p].tv_nsec/1000,p); |
} |
static void CBS_task_insert(LEVEL l, PID p) |
static void CBS_public_unblock(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
struct timespec acttime; |
ll_gettime(TIME_EXACT, &acttime); |
kern_gettime(&acttime); |
CBS_activation(lev,p,&acttime); |
} |
static void CBS_task_extract(LEVEL l, PID p) |
static void CBS_public_block(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
561,10 → 460,10 |
CBS_avail_time_check(lev, p); |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
} |
static void CBS_task_endcycle(LEVEL l, PID p) |
static int CBS_public_message(LEVEL l, PID p, void *m) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
573,24 → 472,27 |
if (lev->nact[p]) { |
/* continue!!!! */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
lev->nact[p]--; |
level_table[ lev->scheduling_level ]-> |
guest_epilogue(lev->scheduling_level,p); |
private_epilogue(lev->scheduling_level,p); |
} |
else { |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
if (lev->flag[p] & CBS_APERIODIC) |
proc_table[p].status = SLEEP; |
else /* the task is soft_periodic */ |
proc_table[p].status = CBS_IDLE; |
} |
} |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
return 0; |
} |
static void CBS_task_end(LEVEL l, PID p) |
static void CBS_public_end(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
598,11 → 500,11 |
CBS_avail_time_check(lev, p); |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
/* we delete the reactivation timer */ |
if (!(lev->flag[p] & CBS_APERIODIC)) { |
event_delete(lev->reactivation_timer[p]); |
kern_event_delete(lev->reactivation_timer[p]); |
lev->reactivation_timer[p] = -1; |
} |
614,98 → 516,11 |
(void *)p); |
} |
static void CBS_task_sleep(LEVEL l, PID p) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
/* check if the wcet is finished... */ |
CBS_avail_time_check(lev, p); |
/* a task activation is finished, but we are using a JOB_TASK_MODEL |
that implements a single activation, so we have to call |
the guest_end, that representsa single activation... */ |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
/* we delete the reactivation timer */ |
if (!(lev->flag[p] & CBS_APERIODIC)) { |
event_delete(lev->reactivation_timer[p]); |
lev->reactivation_timer[p] = -1; |
} |
proc_table[p].status = SLEEP; |
/* the sleep forgets pending activations... */ |
lev->nact[p] = 0; |
} |
static void CBS_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
struct timespec wakeuptime; |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
/* check if the wcet is finished... */ |
CBS_avail_time_check(lev, p); |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
proc_table[p].status = CBS_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT, &wakeuptime); |
ADDUSEC2TIMESPEC(usdelay, &wakeuptime); |
/* the timespec_priority field is used to store the time at witch the delay |
timer raises */ |
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
CBS_timer_delay, |
(void *)p); |
} |
static int CBS_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void CBS_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void CBS_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
/*+ Registration function: |
int flags the init flags ... see CBS.h +*/ |
void CBS_register_level(int flags, LEVEL master) |
LEVEL CBS_register_level(int flags, LEVEL master) |
{ |
LEVEL l; /* the level that we register */ |
CBS_level_des *lev; /* for readableness only */ |
714,58 → 529,28 |
printk("CBS_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(CBS_level_des)); |
printk(" alloco descrittore %d %d\n",l,(int)sizeof(CBS_level_des)); |
lev = (CBS_level_des *)level_table[l]; |
/* alloc the space needed for the CBS_level_des */ |
lev = (CBS_level_des *)kern_alloc(sizeof(CBS_level_des)); |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, CBS_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = CBS_LEVEL_CODE; |
lev->l.level_version = CBS_LEVEL_VERSION; |
lev->l.level_accept_task_model = CBS_level_accept_task_model; |
lev->l.level_accept_guest_model = CBS_level_accept_guest_model; |
lev->l.level_status = CBS_level_status; |
lev->l.level_scheduler = CBS_level_scheduler; |
if (flags & CBS_ENABLE_GUARANTEE) |
lev->l.level_guarantee = CBS_level_guarantee; |
lev->l.public_guarantee = CBS_public_guarantee; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.public_create = CBS_public_create; |
lev->l.public_detach = CBS_public_detach; |
lev->l.public_end = CBS_public_end; |
lev->l.public_eligible = CBS_public_eligible; |
lev->l.public_dispatch = CBS_public_dispatch; |
lev->l.public_epilogue = CBS_public_epilogue; |
lev->l.public_activate = CBS_public_activate; |
lev->l.public_unblock = CBS_public_unblock; |
lev->l.public_block = CBS_public_block; |
lev->l.public_message = CBS_public_message; |
lev->l.task_create = CBS_task_create; |
lev->l.task_detach = CBS_task_detach; |
lev->l.task_eligible = CBS_task_eligible; |
lev->l.task_dispatch = CBS_task_dispatch; |
lev->l.task_epilogue = CBS_task_epilogue; |
lev->l.task_activate = CBS_task_activate; |
lev->l.task_insert = CBS_task_insert; |
lev->l.task_extract = CBS_task_extract; |
lev->l.task_endcycle = CBS_task_endcycle; |
lev->l.task_end = CBS_task_end; |
lev->l.task_sleep = CBS_task_sleep; |
lev->l.task_delay = CBS_task_delay; |
lev->l.guest_create = CBS_guest_create; |
lev->l.guest_detach = CBS_guest_detach; |
lev->l.guest_dispatch = CBS_guest_dispatch; |
lev->l.guest_epilogue = CBS_guest_epilogue; |
lev->l.guest_activate = CBS_guest_activate; |
lev->l.guest_insert = CBS_guest_insert; |
lev->l.guest_extract = CBS_guest_extract; |
lev->l.guest_endcycle = CBS_guest_endcycle; |
lev->l.guest_end = CBS_guest_end; |
lev->l.guest_sleep = CBS_guest_sleep; |
lev->l.guest_delay = CBS_guest_delay; |
/* fill the CBS descriptor part */ |
for (i=0; i<MAX_PROC; i++) { |
NULL_TIMESPEC(&lev->cbs_dline[i]); |
782,16 → 567,15 |
lev->scheduling_level = master; |
lev->flags = flags & 0x01; |
return l; |
} |
bandwidth_t CBS_usedbandwidth(LEVEL l) |
{ |
CBS_level_des *lev = (CBS_level_des *)(level_table[l]); |
if (lev->l.level_code == CBS_LEVEL_CODE && |
lev->l.level_version == CBS_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
int CBS_get_nact(LEVEL l, PID p) |
/shark/tags/rel_0_3/kernel/modules/pi.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: pi.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: pi.c,v 1.2 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
Priority Inhertitance protocol. see pi.h for more details... |
56,7 → 56,6 |
#include <ll/ll.h> |
#include <ll/string.h> |
#include <ll/stdio.h> |
#include <modules/codes.h> |
#include <kernel/const.h> |
#include <sys/types.h> |
#include <kernel/descr.h> |
83,6 → 82,7 |
#if 0 |
/*+ print resource protocol statistics...+*/ |
static void PI_resource_status(RLEVEL r) |
{ |
94,19 → 94,14 |
kern_printf("%-4d", m->nlocked[i]); |
} |
} |
#endif |
static int PI_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
static int PI_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* priority inheritance works with all tasks without Resource parameters */ |
return -1; |
} |
static void PI_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* never called!!! */ |
} |
static void PI_res_detach(RLEVEL l, PID p) |
{ |
PI_mutex_resource_des *m = (PI_mutex_resource_des *)(resource_table[l]); |
115,18 → 110,13 |
kern_raise(XMUTEX_OWNER_KILLED, p); |
} |
static int PI_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a) |
{ |
if (a->mclass == PI_MCLASS || a->mclass == (PI_MCLASS | l) ) |
return 0; |
else |
return -1; |
} |
static int PI_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
PI_mutex_t *p; |
if (a->mclass != PI_MCLASS) |
return -1; |
p = (PI_mutex_t *) kern_alloc(sizeof(PI_mutex_t)); |
/* control if there is enough memory; no control on init on a |
299,7 → 289,7 |
return 0; |
} |
void PI_register_module(void) |
RLEVEL PI_register_module(void) |
{ |
RLEVEL l; /* the level that we register */ |
PI_mutex_resource_des *m; /* for readableness only */ |
317,20 → 307,11 |
resource_table[l] = (resource_des *)m; |
/* fill the resource_des descriptor */ |
strncpy(m->m.r.res_name, PI_MODULENAME, MAX_MODULENAME); |
m->m.r.res_code = PI_MODULE_CODE; |
m->m.r.res_version = PI_MODULE_VERSION; |
m->m.r.rtype = MUTEX_RTYPE; |
m->m.r.resource_status = PI_resource_status; |
m->m.r.level_accept_resource_model = PI_level_accept_resource_model; |
m->m.r.res_register = PI_res_register; |
m->m.r.res_detach = PI_res_detach; |
/* fill the mutex_resource_des descriptor */ |
m->m.level_accept_mutexattr = PI_level_accept_mutexattr; |
m->m.init = PI_init; |
m->m.destroy = PI_destroy; |
m->m.lock = PI_lock; |
342,5 → 323,7 |
m->nlocked[i] = 0; |
m->blocked[i] = NIL; |
} |
return l; |
} |
/shark/tags/rel_0_3/kernel/modules/trcfixed.c |
---|
33,6 → 33,13 |
#include <fcntl.h> |
#include <limits.h> |
/* this file implement a fixed queue, that is simply an array that |
is filled with the events until it is full. After that, all the other |
events are discarded. */ |
typedef struct TAGfixed_queue_t { |
int size; |
int index; |
39,9 → 46,13 |
char *filename; |
int uniq; |
trc_event_t table[0]; |
trc_event_t table[0]; |
/* Yes, 0!... the elements are allocated |
in a dirty way into the kern_alloc into fixed_create */ |
} fixed_queue_t; |
/* This function simply return an event to fill (only if the fixed table |
is not yet full) */ |
static trc_event_t *fixed_get(fixed_queue_t *queue) |
{ |
if (queue->index>=queue->size) return NULL; |
48,6 → 59,8 |
return &queue->table[queue->index++]; |
} |
/* since get returns the correct event address, |
the post function does nothing... */ |
static int fixed_post(fixed_queue_t *queue) |
{ |
return 0; |
60,6 → 73,7 |
{ |
fixed_queue_t *ptr; |
/* initialize the default arguments for the fixed queue */ |
if (!once) { |
/* well... this func is called when the system is not running! */ |
once=1; |
67,11 → 81,12 |
} |
if (args==NULL) args=&defaultargs; |
/* allocate the fixed queue data structure plus the array of events */ |
ptr=(fixed_queue_t*)kern_alloc(sizeof(fixed_queue_t)+ |
sizeof(trc_event_t)*(args->size+1)); |
if (ptr==NULL) return -1; |
/* set the current queue pointers and data */ |
queue->get=(trc_event_t*(*)(void*))fixed_get; |
queue->post=(int(*)(void*))fixed_post; |
queue->data=ptr; |
92,9 → 107,6 |
if (queue->filename==NULL) trc_create_name("fix",queue->uniq,pathname); |
else trc_create_name(queue->filename,0,pathname); |
//sys_status(SCHED_STATUS); |
//task_delay(250000); |
h=open("/TEMP/FIX1",O_CREAT|O_TRUNC|O_WRONLY); |
if (h!=-1) { |
write(h,queue->table,queue->index*sizeof(trc_event_t)); |
/shark/tags/rel_0_3/kernel/modules/nopm.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: nopm.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: nopm.c,v 1.3 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
See modules/nopm.h. |
58,7 → 58,6 |
#include <ll/string.h> |
#include <kernel/const.h> |
#include <sys/types.h> |
#include <modules/codes.h> |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
73,7 → 72,7 |
mutex_t structure */ |
typedef struct { |
PID owner; |
QQUEUE blocked; |
IQUEUE blocked; |
int counter; |
} NOPM_mutex_t; |
108,12 → 107,12 |
kern_printf("----------------------\n"); |
for(i=0;i<index;i++) { |
ptr=table[i]->opt; |
if (ptr->blocked.first!=NIL) { |
if (!iq_isempty(&ptr->blocked)) { |
kern_printf("%i blocks on 0x%p: ",ptr->owner,table[i]); |
j=ptr->blocked.first; |
j=iq_query_first(&ptr->blocked); |
while (j!=NIL) { |
kern_printf("%i ",(int)j); |
j=proc_table[j].next; |
j=iq_query_next(j, &ptr->blocked); |
} |
kern_printf("\n"); |
} else { |
138,40 → 137,23 |
#define NOPM_WAIT LIB_STATUS_BASE |
/*+ print resource protocol statistics...+*/ |
static void NOPM_resource_status(RLEVEL r) |
{ |
kern_printf("No status for NOPM module\n"); |
} |
static int NOPM_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
static int NOPM_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* priority inheritance works with all tasks without Resource parameters */ |
return -1; |
} |
static void NOPM_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* never called!!! */ |
} |
static void NOPM_res_detach(RLEVEL l, PID p) |
{ |
} |
static int NOPM_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a) |
{ |
if (a->mclass == NOPM_MCLASS || a->mclass == (NOPM_MCLASS | l) ) |
return 0; |
else |
return -1; |
} |
static int NOPM_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
NOPM_mutex_t *p; |
if (a->mclass != NOPM_MCLASS) |
return -1; |
p = (NOPM_mutex_t *) kern_alloc(sizeof(NOPM_mutex_t)); |
/* control if there is enough memory; no control on init on a |
181,7 → 163,7 |
return (ENOMEM); |
p->owner = NIL; |
qq_init(&p->blocked); |
iq_init(&p->blocked, &freedesc, 0); |
p->counter=0; |
m->mutexlevel = l; |
234,27 → 216,16 |
if (p->owner != NIL) { /* We must block exec task */ |
LEVEL l; /* for readableness only */ |
TIME tx; /* a dummy TIME for timespec operations */ |
struct timespec ty; /* a dummy timespec for timespec operations */ |
proc_table[exec_shadow].context = kern_context_save(); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task in the semaphore queue */ |
proc_table[exec_shadow].status = NOPM_WAIT; |
qq_insertlast(exec_shadow,&p->blocked); |
iq_insertlast(exec_shadow,&p->blocked); |
/* and finally we reschedule */ |
exec = exec_shadow = -1; |
327,13 → 298,13 |
/* the mutex is mine, pop the firsttask to extract */ |
for (;;) { |
e = qq_getfirst(&p->blocked); |
e = iq_getfirst(&p->blocked); |
if (e == NIL) { |
p->owner = NIL; |
break; |
} else if (proc_table[e].status == NOPM_WAIT) { |
l = proc_table[e].task_level; |
level_table[l]->task_insert(l,e); |
level_table[l]->public_unblock(l,e); |
p->counter++; |
break; |
} |
348,7 → 319,7 |
return 0; |
} |
void NOPM_register_module(void) |
RLEVEL NOPM_register_module(void) |
{ |
RLEVEL l; /* the level that we register */ |
NOPM_mutex_resource_des *m; /* for readableness only */ |
365,20 → 336,11 |
resource_table[l] = (resource_des *)m; |
/* fill the resource_des descriptor */ |
strncpy(m->m.r.res_name, NOPM_MODULENAME, MAX_MODULENAME); |
m->m.r.res_code = NOPM_MODULE_CODE; |
m->m.r.res_version = NOPM_MODULE_VERSION; |
m->m.r.rtype = MUTEX_RTYPE; |
m->m.r.resource_status = NOPM_resource_status; |
m->m.r.level_accept_resource_model = NOPM_level_accept_resource_model; |
m->m.r.res_register = NOPM_res_register; |
m->m.r.res_detach = NOPM_res_detach; |
/* fill the mutex_resource_des descriptor */ |
m->m.level_accept_mutexattr = NOPM_level_accept_mutexattr; |
m->m.init = NOPM_init; |
m->m.destroy = NOPM_destroy; |
m->m.lock = NOPM_lock; |
385,5 → 347,6 |
m->m.trylock = NOPM_trylock; |
m->m.unlock = NOPM_unlock; |
return l; |
} |
/shark/tags/rel_0_3/kernel/modules/bd_pscan.c |
---|
38,11 → 38,11 |
*/ |
/* |
* CVS : $Id: bd_pscan.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
* CVS : $Id: bd_pscan.c,v 1.2 2003-01-07 17:07:50 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:52 $ |
* Revision: $Revision: 1.2 $ |
* Last update: $Date: 2003-01-07 17:07:50 $ |
*/ |
#include <modules/bd_pscan.h> |
51,7 → 51,6 |
#include <ll/string.h> |
#include <ll/stdio.h> |
#include <kernel/const.h> |
#include <modules/codes.h> |
#include <sys/types.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
68,21 → 67,21 |
int priority[MAX_PROC]; |
} bd_pscan_resource_des; |
static int res_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
static int res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
assertk(mylevel==l); |
if (r->rclass==BDPSCAN_RCLASS||r->rclass==(BDPSCAN_RCLASS|l)) |
return 0; |
else |
bd_pscan_resource_des *m=(bd_pscan_resource_des*)(resource_table[l]); |
BDPSCAN_RES_MODEL *rm; |
if (r->rclass!=BDEDF_RCLASS) |
return -1; |
} |
static void res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
bd_pscan_resource_des *m=(bd_pscan_resource_des*)(resource_table[l]); |
BDPSCAN_RES_MODEL *rm=(BDPSCAN_RES_MODEL*)r; |
if (r->level && r->level !=l) |
return -1; |
rm=(BDPSCAN_RES_MODEL*)r; |
assertk(mylevel==l); |
m->priority[p]=rm->priority; |
return 0; |
} |
static void res_detach(RLEVEL l, PID p) |
92,10 → 91,7 |
m->priority[p]=LOWESTPRIORITY; |
} |
static void res_resource_status(void) |
{} |
void BD_PSCAN_register_module(void) |
RLEVEL BD_PSCAN_register_module(void) |
{ |
RLEVEL l; |
bd_pscan_resource_des *m; |
111,12 → 107,7 |
resource_table[l]=(resource_des*)m; |
/* fill the resource_des descriptor */ |
strcpy(m->rd.res_name,BDPSCAN_MODULENAME); |
m->rd.res_code=BDPSCAN_MODULE_CODE; |
m->rd.res_version=BDPSCAN_MODULE_VERSION; |
m->rd.rtype=DEFAULT_RTYPE; |
m->rd.resource_status=res_resource_status; |
m->rd.level_accept_resource_model=res_level_accept_resource_model; |
m->rd.res_register=res_register; |
m->rd.res_detach=res_detach; |
124,6 → 115,8 |
assertk(mylevel==-1); |
mylevel=l; |
return l; |
} |
int bd_pscan_getpriority(void) |
/shark/tags/rel_0_3/kernel/modules/rm.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: rm.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: rm.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the scheduling module RM (Rate Monotonic) |
41,7 → 41,7 |
**/ |
/* |
* Copyright (C) 2000 Paolo Gai |
* Copyright (C) 2000,2002 Paolo Gai |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
71,7 → 71,6 |
/*+ Status used in the level +*/ |
#define RM_READY MODULE_STATUS_BASE /*+ - Ready status +*/ |
#define RM_DELAY MODULE_STATUS_BASE+1 /*+ - Delay status +*/ |
#define RM_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/ |
#define RM_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/ |
#define RM_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/ |
94,7 → 93,7 |
/*+ used to manage the JOB_TASK_MODEL and the |
periodicity +*/ |
QUEUE ready; /*+ the ready queue +*/ |
IQUEUE ready; /*+ the ready queue +*/ |
int flags; /*+ the init flags... +*/ |
103,28 → 102,12 |
} RM_level_des; |
static char *RM_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case RM_READY : return "RM_Ready"; |
case RM_DELAY : return "RM_Delay"; |
case RM_WCET_VIOLATED: return "RM_Wcet_Violated"; |
case RM_WAIT : return "RM_Sporadic_Wait"; |
case RM_IDLE : return "RM_Idle"; |
case RM_ZOMBIE : return "RM_Zombie"; |
default : return "RM_Unknown"; |
} |
} |
static void RM_timer_deadline(void *par) |
{ |
PID p = (PID) par; |
RM_level_des *lev; |
struct timespec *temp; |
lev = (RM_level_des *)level_table[proc_table[p].task_level]; |
switch (proc_table[p].status) { |
131,7 → 114,7 |
case RM_ZOMBIE: |
/* we finally put the task in the ready queue */ |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
iq_insertfirst(p,&freedesc); |
/* and free the allocated bandwidth */ |
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet; |
break; |
140,12 → 123,11 |
/* tracer stuff */ |
trc_logevent(TRC_INTACTIVATION,&p); |
/* similar to RM_task_activate */ |
TIMESPEC_ASSIGN(&proc_table[p].request_time, |
&proc_table[p].timespec_priority); |
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority); |
temp = iq_query_timespec(p, &lev->ready); |
ADDUSEC2TIMESPEC(lev->period[p], temp); |
proc_table[p].status = RM_READY; |
q_insert(p,&lev->ready); |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
iq_priority_insert(p,&lev->ready); |
lev->deadline_timer[p] = kern_event_post(temp, |
RM_timer_deadline, |
(void *)p); |
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority ); |
173,112 → 155,16 |
kern_raise(XDEADLINE_MISS,p); |
} |
/*+ this function is called when a task finish his delay +*/ |
static void RM_timer_delay(void *par) |
{ |
PID p = (PID) par; |
RM_level_des *lev; |
lev = (RM_level_des *)level_table[proc_table[p].task_level]; |
proc_table[p].status = RM_READY; |
q_insert(p,&lev->ready); |
proc_table[p].delay_timer = NIL; /* Paranoia */ |
event_need_reschedule(); |
} |
static int RM_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) { |
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m; |
if (h->wcet && h->mit) |
return 0; |
} |
return -1; |
} |
static int RM_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == JOB_PCLASS || m->pclass == (JOB_PCLASS | l)) |
return 0; |
else |
return -1; |
} |
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
static void RM_level_status(LEVEL l) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
PID p = lev->ready; |
kern_printf("Wcet Check : %s\n", |
onoff(lev->flags & RM_ENABLE_WCET_CHECK)); |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & RM_ENABLE_GUARANTEE)); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
while (p != NIL) { |
if ((proc_table[p].pclass) == JOB_PCLASS) |
kern_printf("Pid: %2d (GUEST)\n", p); |
else |
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n", |
p, |
proc_table[p].name, |
lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period ", |
lev->period[p], |
proc_table[p].timespec_priority.tv_sec, |
proc_table[p].timespec_priority.tv_nsec/1000, |
RM_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != RM_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n", |
p, |
proc_table[p].name, |
lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period ", |
lev->period[p], |
proc_table[p].timespec_priority.tv_sec, |
proc_table[p].timespec_priority.tv_nsec/1000, |
RM_status_to_a(proc_table[p].status)); |
} |
/* The scheduler only gets the first task in the queue */ |
static PID RM_level_scheduler(LEVEL l) |
static PID RM_public_scheduler(LEVEL l) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* { // print 4 dbg the ready queue |
PID p= lev->ready; |
kern_printf("(s"); |
while (p != NIL) { |
kern_printf("%d ",p); |
p = proc_table[p].next; |
} |
kern_printf(") "); |
} |
*/ |
return (PID)lev->ready; |
return iq_query_first(&lev->ready); |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int RM_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int RM_public_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
296,16 → 182,19 |
} |
static int RM_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int RM_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* if the RM_task_create is called, then the pclass must be a |
valid pclass. */ |
HARD_TASK_MODEL *h; |
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m; |
if (m->pclass != HARD_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
h = (HARD_TASK_MODEL *)m; |
if (!h->wcet || !h->mit) return -1; |
/* now we know that m is a valid model */ |
proc_table[p].priority = lev->period[p] = h->mit; |
*iq_query_priority(p, &lev->ready) = lev->period[p] = h->mit; |
if (h->periodicity == APERIODIC) |
lev->flag[p] = RM_FLAG_SPORADIC; |
347,7 → 236,7 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
static void RM_task_detach(LEVEL l, PID p) |
static void RM_public_detach(LEVEL l, PID p) |
{ |
/* the RM level doesn't introduce any dinamic allocated new field. |
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated |
361,21 → 250,8 |
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet; |
} |
static int RM_task_eligible(LEVEL l, PID p) |
static void RM_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void RM_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
// kern_printf("(disp %d)",p); |
383,20 → 259,10 |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
q_extract(p, &lev->ready); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
iq_extract(p, &lev->ready); |
} |
static void RM_task_epilogue(LEVEL l, PID p) |
static void RM_public_epilogue(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
410,14 → 276,15 |
} |
else { |
/* the task has been preempted. it returns into the ready queue... */ |
q_insert(p,&lev->ready); |
iq_priority_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
} |
} |
static void RM_task_activate(LEVEL l, PID p) |
static void RM_public_activate(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
struct timespec *temp; |
if (proc_table[p].status == RM_WAIT) { |
kern_raise(XACTIVATION,p); |
432,35 → 299,33 |
/* see also RM_timer_deadline */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
temp = iq_query_timespec(p, &lev->ready); |
kern_gettime(temp); |
ADDUSEC2TIMESPEC(lev->period[p], temp); |
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, |
&proc_table[p].request_time); |
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority); |
/* Insert task in the correct position */ |
proc_table[p].status = RM_READY; |
q_insert(p,&lev->ready); |
iq_priority_insert(p,&lev->ready); |
/* Set the deadline timer */ |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
lev->deadline_timer[p] = kern_event_post(temp, |
RM_timer_deadline, |
(void *)p); |
} |
static void RM_task_insert(LEVEL l, PID p) |
static void RM_public_unblock(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* Similar to RM_task_activate, but we don't check in what state |
the task is and we don't set the request_time*/ |
/* Similar to RM_task_activate, |
but we don't check in what state the task is */ |
/* Insert task in the correct position */ |
proc_table[p].status = RM_READY; |
q_insert(p,&lev->ready); |
iq_priority_insert(p,&lev->ready); |
} |
static void RM_task_extract(LEVEL l, PID p) |
static void RM_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
473,7 → 338,7 |
*/ |
} |
static void RM_task_endcycle(LEVEL l, PID p) |
static int RM_public_message(LEVEL l, PID p, void *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
487,14 → 352,17 |
if (lev->flags & RM_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
/* when the deadline timer fire, it recognize the situation and set |
correctly all the stuffs (like reactivation, request_time, etc... ) */ |
correctly all the stuffs (like reactivation, sleep, etc... ) */ |
return 0; |
} |
static void RM_task_end(LEVEL l, PID p) |
static void RM_public_end(LEVEL l, PID p) |
{ |
// RM_level_des *lev = (RM_level_des *)(level_table[l]); |
proc_table[p].status = RM_ZOMBIE; |
/* When the deadline timer fire, it put the task descriptor in |
501,183 → 369,81 |
the free queue, and free the allocated bandwidth... */ |
} |
static void RM_task_sleep(LEVEL l, PID p) |
static void RM_private_insert(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
JOB_TASK_MODEL *job; |
/* the task has terminated his job before it consume the wcet. All OK! */ |
proc_table[p].status = RM_WAIT; |
if (m->pclass != JOB_PCLASS || (m->level != 0 && m->level != l) ) { |
kern_raise(XINVALID_TASK, p); |
return; |
} |
/* we reset the capacity counters... */ |
if (lev->flags & RM_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
job = (JOB_TASK_MODEL *)m; |
/* when the deadline timer fire, it recognize the situation and set |
correctly the task state to sleep... */ |
} |
static void RM_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
struct timespec wakeuptime; |
// RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* equal to RM_task_endcycle */ |
proc_table[p].status = RM_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT, &wakeuptime); |
ADDUSEC2TIMESPEC(usdelay, &wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
RM_timer_delay, |
(void *)p); |
} |
/* Guest Functions |
These functions manages a JOB_TASK_MODEL, that is used to put |
a guest task in the RM ready queue. */ |
static int RM_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
JOB_TASK_MODEL *job = (JOB_TASK_MODEL *)m; |
/* if the RM_guest_create is called, then the pclass must be a |
valid pclass. */ |
*iq_query_timespec(p,&lev->ready) = job->deadline; |
*iq_query_priority(p, &lev->ready) = lev->period[p] = job->period; |
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &job->deadline); |
lev->deadline_timer[p] = -1; |
/* Insert task in the correct position */ |
iq_priority_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
if (job->noraiseexc) |
lev->flag[p] = RM_FLAG_NORAISEEXC; |
else |
else { |
lev->flag[p] = 0; |
proc_table[p].priority = lev->period[p] = job->period; |
/* there is no bandwidth guarantee at this level, it is performed |
by the level that inserts guest tasks... */ |
return 0; /* OK, also if the task cannot be guaranteed... */ |
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready), |
RM_timer_guest_deadline, |
(void *)p); |
} |
} |
static void RM_guest_detach(LEVEL l, PID p) |
static void RM_private_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the RM level doesn't introduce any dinamic allocated new field. |
No guarantee is performed on guest tasks... so we don't have to reset |
the NO_GUARANTEE FIELD */ |
} |
static void RM_guest_dispatch(LEVEL l, PID p, int nostop) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* the task state is set to EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
q_extract(p, &lev->ready); |
iq_extract(p, &lev->ready); |
} |
static void RM_guest_epilogue(LEVEL l, PID p) |
static void RM_private_epilogue(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* the task has been preempted. it returns into the ready queue... */ |
q_insert(p,&lev->ready); |
iq_priority_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
} |
static void RM_guest_activate(LEVEL l, PID p) |
static void RM_private_extract(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* Insert task in the correct position */ |
q_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
/* Set the deadline timer */ |
if (!(lev->flag[p] & RM_FLAG_NORAISEEXC)) |
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority, |
RM_timer_guest_deadline, |
(void *)p); |
} |
static void RM_guest_insert(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* Insert task in the correct position */ |
q_insert(p,&lev->ready); |
proc_table[p].status = RM_READY; |
} |
static void RM_guest_extract(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
. the state of the task is set by the calling function |
. the deadline must remain... |
So, we do nothing!!! |
*/ |
} |
static void RM_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RM_guest_end(LEVEL l, PID p) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
//kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
if (proc_table[p].status == RM_READY) |
{ |
q_extract(p, &lev->ready); |
iq_extract(p, &lev->ready); |
//kern_printf("(g_end rdy extr)"); |
} |
else if (proc_table[p].status == RM_DELAY) { |
event_delete(proc_table[p].delay_timer); |
proc_table[p].delay_timer = NIL; /* paranoia */ |
} |
/* we remove the deadline timer, because the slice is finished */ |
if (lev->deadline_timer[p] != NIL) { |
// kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]); |
event_delete(lev->deadline_timer[p]); |
kern_event_delete(lev->deadline_timer[p]); |
lev->deadline_timer[p] = NIL; |
} |
} |
static void RM_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RM_guest_delay(LEVEL l, PID p, TIME usdelay) |
{ |
struct timespec wakeuptime; |
// RM_level_des *lev = (RM_level_des *)(level_table[l]); |
/* equal to RM_task_endcycle */ |
proc_table[p].status = RM_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT, &wakeuptime); |
ADDUSEC2TIMESPEC(usdelay, &wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
RM_timer_delay, |
(void *)p); |
} |
/* Registration functions */ |
/*+ Registration function: |
int flags the init flags ... see rm.h +*/ |
void RM_register_level(int flags) |
LEVEL RM_register_level(int flags) |
{ |
LEVEL l; /* the level that we register */ |
RM_level_des *lev; /* for readableness only */ |
686,56 → 452,34 |
printk("RM_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(RM_level_des)); |
/* alloc the space needed for the RM_level_des */ |
lev = (RM_level_des *)kern_alloc(sizeof(RM_level_des)); |
lev = (RM_level_des *)level_table[l]; |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, RM_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = RM_LEVEL_CODE; |
lev->l.level_version = RM_LEVEL_VERSION; |
lev->l.private_insert = RM_private_insert; |
lev->l.private_extract = RM_private_extract; |
lev->l.private_dispatch = RM_private_dispatch; |
lev->l.private_epilogue = RM_private_epilogue; |
lev->l.level_accept_task_model = RM_level_accept_task_model; |
lev->l.level_accept_guest_model = RM_level_accept_guest_model; |
lev->l.level_status = RM_level_status; |
lev->l.level_scheduler = RM_level_scheduler; |
lev->l.public_scheduler = RM_public_scheduler; |
if (flags & RM_ENABLE_GUARANTEE) |
lev->l.level_guarantee = RM_level_guarantee; |
lev->l.public_guarantee = RM_public_guarantee; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.task_create = RM_task_create; |
lev->l.task_detach = RM_task_detach; |
lev->l.task_eligible = RM_task_eligible; |
lev->l.task_dispatch = RM_task_dispatch; |
lev->l.task_epilogue = RM_task_epilogue; |
lev->l.task_activate = RM_task_activate; |
lev->l.task_insert = RM_task_insert; |
lev->l.task_extract = RM_task_extract; |
lev->l.task_endcycle = RM_task_endcycle; |
lev->l.task_end = RM_task_end; |
lev->l.task_sleep = RM_task_sleep; |
lev->l.task_delay = RM_task_delay; |
lev->l.public_create = RM_public_create; |
lev->l.public_detach = RM_public_detach; |
lev->l.public_end = RM_public_end; |
lev->l.public_dispatch = RM_public_dispatch; |
lev->l.public_epilogue = RM_public_epilogue; |
lev->l.public_activate = RM_public_activate; |
lev->l.public_unblock = RM_public_unblock; |
lev->l.public_block = RM_public_block; |
lev->l.public_message = RM_public_message; |
lev->l.guest_create = RM_guest_create; |
lev->l.guest_detach = RM_guest_detach; |
lev->l.guest_dispatch = RM_guest_dispatch; |
lev->l.guest_epilogue = RM_guest_epilogue; |
lev->l.guest_activate = RM_guest_activate; |
lev->l.guest_insert = RM_guest_insert; |
lev->l.guest_extract = RM_guest_extract; |
lev->l.guest_endcycle = RM_guest_endcycle; |
lev->l.guest_end = RM_guest_end; |
lev->l.guest_sleep = RM_guest_sleep; |
lev->l.guest_delay = RM_guest_delay; |
/* fill the RM descriptor part */ |
for(i=0; i<MAX_PROC; i++) { |
lev->period[i] = 0; |
743,18 → 487,17 |
lev->flag[i] = 0; |
} |
lev->ready = NIL; |
iq_init(&lev->ready, &freedesc, 0); |
lev->flags = flags & 0x07; |
lev->U = 0; |
return l; |
} |
bandwidth_t RM_usedbandwidth(LEVEL l) |
{ |
RM_level_des *lev = (RM_level_des *)(level_table[l]); |
if (lev->l.level_code == RM_LEVEL_CODE && |
lev->l.level_version == RM_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
/shark/tags/rel_0_3/kernel/modules/rrsoft.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: rrsoft.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: rrsoft.c,v 1.4 2003-01-07 17:07:51 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:51 $ |
------------ |
This file contains the scheduling module RRSOFT (Round Robin) |
60,10 → 60,10 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/*+ Status used in the level +*/ |
#define RRSOFT_READY MODULE_STATUS_BASE |
#define RRSOFT_DELAY MODULE_STATUS_BASE+1 |
#define RRSOFT_IDLE MODULE_STATUS_BASE+2 |
/*+ the level redefinition for the Round Robin level +*/ |
72,7 → 72,7 |
int nact[MAX_PROC]; /*+ number of pending activations +*/ |
QQUEUE ready; /*+ the ready queue +*/ |
IQUEUE ready; /*+ the ready queue +*/ |
int slice; /*+ the level's time slice +*/ |
93,20 → 93,6 |
} RRSOFT_level_des; |
static char *RRSOFT_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case RRSOFT_READY: return "RRSOFT_Ready"; |
case RRSOFT_DELAY: return "RRSOFT_Delay"; |
case RRSOFT_IDLE : return "RRSOFT_Idle"; |
default : return "RRSOFT_Unknown"; |
} |
} |
/* this is the periodic reactivation of the task... it is posted only |
if the task is a periodic task */ |
static void RRSOFT_timer_reactivate(void *par) |
121,7 → 107,7 |
/* the task has finished the current activation and must be |
reactivated */ |
proc_table[p].status = RRSOFT_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
event_need_reschedule(); |
} |
139,72 → 125,11 |
// trc_logevent(TRC_INTACTIVATION,&p); |
} |
/*+ this function is called when a task finish his delay +*/ |
static void RRSOFT_timer_delay(void *par) |
{ |
PID p = (PID) par; |
RRSOFT_level_des *lev; |
lev = (RRSOFT_level_des *)level_table[proc_table[p].task_level]; |
proc_table[p].status = RRSOFT_READY; |
qq_insertlast(p,&lev->ready); |
proc_table[p].delay_timer = NIL; /* Paranoia */ |
// kern_printf(" DELAY TIMER %d ", p); |
event_need_reschedule(); |
} |
static int RRSOFT_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
if ((m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l)) && lev->models & RRSOFT_ONLY_NRT) |
return 0; |
else if ((m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l)) && lev->models & RRSOFT_ONLY_SOFT) |
return 0; |
else if ((m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) && lev->models & RRSOFT_ONLY_HARD) |
return 0; |
else |
return -1; |
} |
static int RRSOFT_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static void RRSOFT_level_status(LEVEL l) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->ready); |
kern_printf("Slice: %d \n", lev->slice); |
while (p != NIL) { |
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name, |
RRSOFT_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != RRSOFT_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name, |
RRSOFT_status_to_a(proc_table[p].status)); |
} |
/* This is not efficient but very fair :-) |
The need of all this stuff is because if a task execute a long time |
due to (shadow!) priority inheritance, then the task shall go to the |
tail of the queue many times... */ |
static PID RRSOFT_level_scheduler(LEVEL l) |
static PID RRSOFT_public_scheduler(LEVEL l) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
211,7 → 136,7 |
PID p; |
for (;;) { |
p = qq_queryfirst(&lev->ready); |
p = iq_query_first(&lev->ready); |
if (p == -1) |
return p; |
//{kern_printf("(s%d)",p); return p;} |
219,8 → 144,8 |
// kern_printf("(p=%d l=%d avail=%d wcet =%d)\n",p,l,proc_table[p].avail_time, proc_table[p].wcet); |
if (proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_extract(p,&lev->ready); |
qq_insertlast(p,&lev->ready); |
iq_extract(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
else |
//{kern_printf("(s%d)",p); return p;} |
229,17 → 154,8 |
} |
} |
static int RRSOFT_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int RRSOFT_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
/* the RRSOFT level always guarantee... the function is defined because |
there can be an aperiodic server at a level with less priority than |
the RRSOFT that need guarantee (e.g., a TBS server) */ |
return 1; |
} |
static int RRSOFT_task_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
// kern_printf("create %d mod %d\n",p,m->pclass); |
247,6 → 163,11 |
the only thing to set remains the capacity stuffs that are set |
to the values passed in the model... */ |
if ( !(m->pclass==NRT_PCLASS && lev->models & RRSOFT_ONLY_NRT ) ) return -1; |
if ( !(m->pclass==SOFT_PCLASS && lev->models & RRSOFT_ONLY_SOFT) ) return -1; |
if ( !(m->pclass==HARD_PCLASS && lev->models & RRSOFT_ONLY_HARD) ) return -1; |
if (m->level != 0 && m->level != l) return -1; |
/* I used the wcet field because using wcet can account if a task |
consume more than the timeslice... */ |
310,49 → 231,17 |
return 0; /* OK */ |
} |
static void RRSOFT_task_detach(LEVEL l, PID p) |
static void RRSOFT_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the RRSOFT level doesn't introduce any new field in the TASK_MODEL |
so, all detach stuffs are done by the task_create |
The task state is set at FREE by the general task_create */ |
} |
static int RRSOFT_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void RRSOFT_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
//static int p2count=0; |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
qq_extract(p, &lev->ready); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
iq_extract(p, &lev->ready); |
} |
static void RRSOFT_task_epilogue(LEVEL l, PID p) |
static void RRSOFT_public_epilogue(LEVEL l, PID p) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
360,16 → 249,16 |
qqueue position */ |
if (proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
else |
/* curr is >0, so the running task have to run for another cuRRSOFT usec */ |
qq_insertfirst(p,&lev->ready); |
iq_insertfirst(p,&lev->ready); |
proc_table[p].status = RRSOFT_READY; |
} |
static void RRSOFT_task_activate(LEVEL l, PID p) |
static void RRSOFT_public_activate(LEVEL l, PID p) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
381,18 → 270,15 |
return; |
} |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
/* Insert task in the coRRSOFTect position */ |
/* Insert task in the correct position */ |
proc_table[p].status = RRSOFT_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
/* Set the reactivation timer */ |
if (lev->periodic[p]) |
{ |
TIMESPEC_ASSIGN(&lev->reactivation_time[p], &proc_table[p].request_time); |
kern_gettime(&lev->reactivation_time[p]); |
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]); |
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]); |
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p], |
RRSOFT_timer_reactivate, |
(void *)p); |
399,19 → 285,19 |
} |
} |
static void RRSOFT_task_insert(LEVEL l, PID p) |
static void RRSOFT_public_unblock(LEVEL l, PID p) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
/* Similar to RRSOFT_task_activate, but we don't check in what state |
the task is and we don't set the request_time */ |
the task is */ |
/* Insert task in the coRRSOFTect position */ |
proc_table[p].status = RRSOFT_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
static void RRSOFT_task_extract(LEVEL l, PID p) |
static void RRSOFT_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
423,23 → 309,27 |
*/ |
} |
static void RRSOFT_task_endcycle(LEVEL l, PID p) |
static int RRSOFT_public_message(LEVEL l, PID p, void *m) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
if (lev->nact[p] > 0) { |
/* continue!!!! */ |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
lev->nact[p]--; |
// qq_insertlast(p,&lev->ready); |
qq_insertfirst(p,&lev->ready); |
iq_insertfirst(p,&lev->ready); |
proc_table[p].status = RRSOFT_READY; |
} |
else |
proc_table[p].status = RRSOFT_IDLE; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
return 0; |
} |
static void RRSOFT_task_end(LEVEL l, PID p) |
static void RRSOFT_public_end(LEVEL l, PID p) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
447,83 → 337,15 |
/* we delete the reactivation timer */ |
if (lev->periodic[p]) { |
event_delete(lev->reactivation_timer[p]); |
kern_event_delete(lev->reactivation_timer[p]); |
lev->reactivation_timer[p] = -1; |
} |
/* then, we insert the task in the free queue */ |
proc_table[p].status = FREE; |
q_insert(p,&freedesc); |
iq_insertlast(p,&freedesc); |
} |
static void RRSOFT_task_sleep(LEVEL l, PID p) |
{ |
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
if (lev->nact[p] >= 0) lev->nact[p] = 0; |
/* we delete the reactivation timer */ |
if (lev->periodic[p]) { |
event_delete(lev->reactivation_timer[p]); |
lev->reactivation_timer[p] = -1; |
} |
proc_table[p].status = SLEEP; |
} |
static void RRSOFT_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
// RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]); |
struct timespec wakeuptime; |
/* equal to RRSOFT_task_endcycle */ |
proc_table[p].status = RRSOFT_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT,&wakeuptime); |
ADDUSEC2TIMESPEC(usdelay,&wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
RRSOFT_timer_delay, |
(void *)p); |
} |
static int RRSOFT_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void RRSOFT_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RRSOFT_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
/*+ This init function install the "main" task +*/ |
551,7 → 373,7 |
if (p == NIL) |
printk("\nPanic!!! can't create main task...\n"); |
RRSOFT_task_activate(lev,p); |
RRSOFT_public_activate(lev,p); |
} |
559,7 → 381,7 |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void RRSOFT_register_level(TIME slice, |
LEVEL RRSOFT_register_level(TIME slice, |
int createmain, |
struct multiboot_info *mb, |
BYTE models) |
571,52 → 393,23 |
printk("RRSOFT_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(RRSOFT_level_des)); |
/* alloc the space needed for the RRSOFT_level_des */ |
lev = (RRSOFT_level_des *)kern_alloc(sizeof(RRSOFT_level_des)); |
lev = (RRSOFT_level_des *)level_table[l]; |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, RRSOFT_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = RRSOFT_LEVEL_CODE; |
lev->l.level_version = RRSOFT_LEVEL_VERSION; |
lev->l.public_scheduler = RRSOFT_public_scheduler; |
lev->l.public_create = RRSOFT_public_create; |
lev->l.public_end = RRSOFT_public_end; |
lev->l.public_dispatch = RRSOFT_public_dispatch; |
lev->l.public_epilogue = RRSOFT_public_epilogue; |
lev->l.public_activate = RRSOFT_public_activate; |
lev->l.public_unblock = RRSOFT_public_unblock; |
lev->l.public_block = RRSOFT_public_block; |
lev->l.public_message = RRSOFT_public_message; |
lev->l.level_accept_task_model = RRSOFT_level_accept_task_model; |
lev->l.level_accept_guest_model = RRSOFT_level_accept_guest_model; |
lev->l.level_status = RRSOFT_level_status; |
lev->l.level_scheduler = RRSOFT_level_scheduler; |
lev->l.level_guarantee = RRSOFT_level_guarantee; |
lev->l.task_create = RRSOFT_task_create; |
lev->l.task_detach = RRSOFT_task_detach; |
lev->l.task_eligible = RRSOFT_task_eligible; |
lev->l.task_dispatch = RRSOFT_task_dispatch; |
lev->l.task_epilogue = RRSOFT_task_epilogue; |
lev->l.task_activate = RRSOFT_task_activate; |
lev->l.task_insert = RRSOFT_task_insert; |
lev->l.task_extract = RRSOFT_task_extract; |
lev->l.task_endcycle = RRSOFT_task_endcycle; |
lev->l.task_end = RRSOFT_task_end; |
lev->l.task_sleep = RRSOFT_task_sleep; |
lev->l.task_delay = RRSOFT_task_delay; |
lev->l.guest_create = RRSOFT_guest_create; |
lev->l.guest_detach = RRSOFT_guest_detach; |
lev->l.guest_dispatch = RRSOFT_guest_dispatch; |
lev->l.guest_epilogue = RRSOFT_guest_epilogue; |
lev->l.guest_activate = RRSOFT_guest_activate; |
lev->l.guest_insert = RRSOFT_guest_insert; |
lev->l.guest_extract = RRSOFT_guest_extract; |
lev->l.guest_endcycle = RRSOFT_guest_endcycle; |
lev->l.guest_end = RRSOFT_guest_end; |
lev->l.guest_sleep = RRSOFT_guest_sleep; |
lev->l.guest_delay = RRSOFT_guest_delay; |
/* fill the RRSOFT descriptor part */ |
for (i = 0; i < MAX_PROC; i++) { |
lev->nact[i] = -1; |
626,7 → 419,7 |
lev->period[i] = 0; |
} |
qq_init(&lev->ready); |
iq_init(&lev->ready, &freedesc, 0); |
if (slice < RRSOFT_MINIMUM_SLICE) slice = RRSOFT_MINIMUM_SLICE; |
if (slice > RRSOFT_MAXIMUM_SLICE) slice = RRSOFT_MAXIMUM_SLICE; |
638,6 → 431,8 |
if (createmain) |
sys_atrunlevel(RRSOFT_call_main,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
/shark/tags/rel_0_3/kernel/modules/ps.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: ps.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: ps.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the aperiodic server PS (Polling Server) |
103,6 → 103,7 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/*+ Status used in the level +*/ |
#define PS_WAIT APER_STATUS_BASE /*+ waiting the service +*/ |
122,7 → 123,7 |
int Cs; /*+ server capacity +*/ |
int availCs; /*+ server avail time +*/ |
QQUEUE wait; /*+ the wait queue of the PS +*/ |
IQUEUE wait; /*+ the wait queue of the PS +*/ |
PID activated; /*+ the task inserted in another queue +*/ |
int flags; /*+ the init flags... +*/ |
145,8 → 146,7 |
m = lev->scheduling_level; |
job_task_default_model(j,lev->lastdline); |
job_task_def_period(j,lev->period); |
level_table[m]->guest_create(m,p,(TASK_MODEL *)&j); |
level_table[m]->guest_activate(m,p); |
level_table[m]->private_insert(m,p,(TASK_MODEL *)&j); |
// kern_printf("(%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec); |
} |
167,8 → 167,8 |
was not any other task to be put in the ready queue |
... we are now activating the next task */ |
if (lev->availCs > 0 && lev->activated == NIL) { |
if (qq_queryfirst(&lev->wait) != NIL) { |
lev->activated = qq_getfirst(&lev->wait); |
if (iq_query_first(&lev->wait) != NIL) { |
lev->activated = iq_getfirst(&lev->wait); |
PS_activation(lev); |
event_need_reschedule(); |
} |
180,80 → 180,8 |
// kern_printf("!"); |
} |
static char *PS_status_to_a(WORD status) |
static PID PS_public_schedulerbackground(LEVEL l) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case PS_WAIT : return "PS_Wait"; |
default : return "PS_Unknown"; |
} |
} |
static int PS_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) { |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
if (s->periodicity == APERIODIC) |
return 0; |
} |
return -1; |
} |
static int PS_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
static void PS_level_status(LEVEL l) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->wait); |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & PS_ENABLE_GUARANTEE_EDF || |
lev->flags & PS_ENABLE_GUARANTEE_RM )); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
if (lev->activated != -1) |
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n", |
lev->activated, |
proc_table[lev->activated].name, |
proc_table[lev->activated].timespec_priority.tv_sec, |
proc_table[lev->activated].timespec_priority.tv_nsec, |
lev->nact[lev->activated], |
PS_status_to_a(proc_table[lev->activated].status)); |
while (p != NIL) { |
kern_printf("Pid: %2d Name: %10s Stat: %s\n", |
p, |
proc_table[p].name, |
PS_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
} |
static PID PS_level_scheduler(LEVEL l) |
{ |
/* the PS don't schedule anything... |
it's an EDF level or similar that do it! */ |
return NIL; |
} |
static PID PS_level_schedulerbackground(LEVEL l) |
{ |
/* the PS catch the background time to exec aperiodic activities */ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
262,11 → 190,11 |
if (lev->flags & PS_BACKGROUND_BLOCK) |
return NIL; |
else |
return qq_queryfirst(&lev->wait); |
return iq_query_first(&lev->wait); |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int PS_level_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth) |
static int PS_public_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
278,7 → 206,7 |
return 0; |
} |
static int PS_level_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth) |
static int PS_public_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
290,13 → 218,17 |
return 0; |
} |
static int PS_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int PS_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
SOFT_TASK_MODEL *s; |
/* if the PS_task_create is called, then the pclass must be a |
valid pclass. */ |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
if (m->pclass != SOFT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (s->periodicity != APERIODIC) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (s->arrivals == SAVE_ARRIVALS) |
lev->nact[p] = 0; |
306,26 → 238,8 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
static void PS_task_detach(LEVEL l, PID p) |
static void PS_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the PS level doesn't introduce any dinamic allocated new field. */ |
} |
static int PS_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void PS_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
struct timespec ty; |
335,13 → 249,13 |
to exe before calling task_dispatch. we have to check |
lev->activated != p instead */ |
if (lev->activated != p) { |
qq_extract(p, &lev->wait); |
iq_extract(p, &lev->wait); |
//kern_printf("#%d#",p); |
} |
else { |
//if (nostop) kern_printf("(gd status=%d)",proc_table[p].status); |
level_table[ lev->scheduling_level ]-> |
guest_dispatch(lev->scheduling_level,p,nostop); |
private_dispatch(lev->scheduling_level,p,nostop); |
} |
/* set the capacity timer */ |
352,19 → 266,9 |
} |
// kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
} |
static void PS_task_epilogue(LEVEL l, PID p) |
static void PS_public_epilogue(LEVEL l, PID p) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
struct timespec ty; |
396,8 → 300,8 |
task point the shadow to it!!!*/ |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
qq_insertfirst(p, &lev->wait); |
private_extract(lev->scheduling_level,p); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = PS_WAIT; |
lev->activated = NIL; |
} |
406,14 → 310,14 |
wait queue by calling the guest_epilogue... */ |
if (lev->activated == p) {//kern_printf("Û1"); |
level_table[ lev->scheduling_level ]-> |
guest_epilogue(lev->scheduling_level,p); |
private_epilogue(lev->scheduling_level,p); |
} else { //kern_printf("Û2"); |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = PS_WAIT; |
} |
} |
static void PS_task_activate(LEVEL l, PID p) |
static void PS_public_activate(LEVEL l, PID p) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
422,7 → 326,6 |
lev->nact[p]++; |
} |
else if (proc_table[p].status == SLEEP) { |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
if (lev->activated == NIL && lev->availCs > 0) { |
lev->activated = p; |
429,7 → 332,7 |
PS_activation(lev); |
} |
else { |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
proc_table[p].status = PS_WAIT; |
} |
} |
439,7 → 342,7 |
} |
static void PS_task_insert(LEVEL l, PID p) |
static void PS_public_unblock(LEVEL l, PID p) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
449,11 → 352,11 |
/* when we reinsert the task into the system, the server capacity |
is always 0 because nobody executes with the PS before... */ |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = PS_WAIT; |
} |
static void PS_task_extract(LEVEL l, PID p) |
static void PS_public_block(LEVEL l, PID p) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
464,10 → 367,10 |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
} |
static void PS_task_endcycle(LEVEL l, PID p) |
static int PS_public_message(LEVEL l, PID p, void *m) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
struct timespec ty; |
484,56 → 387,32 |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
else |
qq_extract(p, &lev->wait); |
iq_extract(p, &lev->wait); |
if (lev->nact[p] > 0) |
{ |
lev->nact[p]--; |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
proc_table[p].status = PS_WAIT; |
} |
else |
proc_table[p].status = SLEEP; |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated == NIL) |
lev->availCs = 0; /* see note (*) at the begin of the file */ |
else |
PS_activation(lev); |
} |
static void PS_task_end(LEVEL l, PID p) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
struct timespec ty; |
TIME tx; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
/* update the server capacity */ |
if (lev->flags & PS_BACKGROUND) |
lev->flags &= ~PS_BACKGROUND; |
else { |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
lev->availCs -= tx; |
} |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
lev->activated = qq_getfirst(&lev->wait); |
if (lev->activated == NIL) |
lev->availCs = 0; /* see note (*) at the begin of the file */ |
else |
PS_activation(lev); |
return 0; |
} |
static void PS_task_sleep(LEVEL l, PID p) |
static void PS_public_end(LEVEL l, PID p) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
struct timespec ty; |
548,80 → 427,20 |
lev->availCs -= tx; |
} |
if (lev->nact[p] >= 0) lev->nact[p] = 0; |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
else |
qq_extract(p, &lev->wait); |
private_extract(lev->scheduling_level,p); |
proc_table[p].status = SLEEP; |
proc_table[p].status = FREE; |
iq_insertfirst(p,&freedesc); |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated == NIL) |
lev->availCs = 0; /* see note (*) at the begin of the file */ |
else |
PS_activation(lev); |
} |
static void PS_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
struct timespec ty; |
TIME tx; |
/* update the server capacity */ |
if (lev->flags & PS_BACKGROUND) |
lev->flags &= ~PS_BACKGROUND; |
else { |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
lev->availCs -= tx; |
} |
/* I hope no delay when owning a mutex... */ |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_delay(lev->scheduling_level,p,usdelay); |
} |
static int PS_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void PS_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void PS_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
631,7 → 450,7 |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[(LEVEL)l]); |
ll_gettime(TIME_EXACT,&lev->lastdline); |
kern_gettime(&lev->lastdline); |
ADDUSEC2TIMESPEC(lev->period, &lev->lastdline); |
kern_event_post(&lev->lastdline, PS_deadline_timer, l); |
641,7 → 460,7 |
/*+ Registration function: |
int flags the init flags ... see PS.h +*/ |
void PS_register_level(int flags, LEVEL master, int Cs, int per) |
LEVEL PS_register_level(int flags, LEVEL master, int Cs, int per) |
{ |
LEVEL l; /* the level that we register */ |
PS_level_des *lev; /* for readableness only */ |
650,64 → 469,33 |
printk("PS_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(PS_level_des)); |
printk(" alloco descrittore %d %d\n",l,(int)sizeof(PS_level_des)); |
lev = (PS_level_des *)level_table[l]; |
/* alloc the space needed for the PS_level_des */ |
lev = (PS_level_des *)kern_alloc(sizeof(PS_level_des)); |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, PS_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = PS_LEVEL_CODE; |
lev->l.level_version = PS_LEVEL_VERSION; |
lev->l.level_accept_task_model = PS_level_accept_task_model; |
lev->l.level_accept_guest_model = PS_level_accept_guest_model; |
lev->l.level_status = PS_level_status; |
if (flags & PS_ENABLE_BACKGROUND) |
lev->l.level_scheduler = PS_level_schedulerbackground; |
else |
lev->l.level_scheduler = PS_level_scheduler; |
lev->l.public_scheduler = PS_public_schedulerbackground; |
if (flags & PS_ENABLE_GUARANTEE_EDF) |
lev->l.level_guarantee = PS_level_guaranteeEDF; |
lev->l.public_guarantee = PS_public_guaranteeEDF; |
else if (flags & PS_ENABLE_GUARANTEE_RM) |
lev->l.level_guarantee = PS_level_guaranteeRM; |
lev->l.public_guarantee = PS_public_guaranteeRM; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.task_create = PS_task_create; |
lev->l.task_detach = PS_task_detach; |
lev->l.task_eligible = PS_task_eligible; |
lev->l.task_dispatch = PS_task_dispatch; |
lev->l.task_epilogue = PS_task_epilogue; |
lev->l.task_activate = PS_task_activate; |
lev->l.task_insert = PS_task_insert; |
lev->l.task_extract = PS_task_extract; |
lev->l.task_endcycle = PS_task_endcycle; |
lev->l.task_end = PS_task_end; |
lev->l.task_sleep = PS_task_sleep; |
lev->l.task_delay = PS_task_delay; |
lev->l.public_create = PS_public_create; |
lev->l.public_end = PS_public_end; |
lev->l.public_dispatch = PS_public_dispatch; |
lev->l.public_epilogue = PS_public_epilogue; |
lev->l.public_activate = PS_public_activate; |
lev->l.public_unblock = PS_public_unblock; |
lev->l.public_block = PS_public_block; |
lev->l.public_message = PS_public_message; |
lev->l.guest_create = PS_guest_create; |
lev->l.guest_detach = PS_guest_detach; |
lev->l.guest_dispatch = PS_guest_dispatch; |
lev->l.guest_epilogue = PS_guest_epilogue; |
lev->l.guest_activate = PS_guest_activate; |
lev->l.guest_insert = PS_guest_insert; |
lev->l.guest_extract = PS_guest_extract; |
lev->l.guest_endcycle = PS_guest_endcycle; |
lev->l.guest_end = PS_guest_end; |
lev->l.guest_sleep = PS_guest_sleep; |
lev->l.guest_delay = PS_guest_delay; |
/* fill the PS descriptor part */ |
for (i=0; i<MAX_PROC; i++) |
718,7 → 506,7 |
lev->period = per; |
qq_init(&lev->wait); |
iq_init(&lev->wait, &freedesc, 0); |
lev->activated = NIL; |
lev->U = (MAX_BANDWIDTH / per) * Cs; |
728,15 → 516,14 |
lev->flags = flags & 0x07; |
sys_atrunlevel(PS_dline_install,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
bandwidth_t PS_usedbandwidth(LEVEL l) |
{ |
PS_level_des *lev = (PS_level_des *)(level_table[l]); |
if (lev->l.level_code == PS_LEVEL_CODE && |
lev->l.level_version == PS_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
/shark/tags/rel_0_3/kernel/modules/rr.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: rr.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: rr.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the scheduling module RR (Round Robin) |
60,16 → 60,20 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
//#define RRDEBUG |
#define rr_printf kern_printf |
/*+ Status used in the level +*/ |
#define RR_READY MODULE_STATUS_BASE |
#define RR_DELAY MODULE_STATUS_BASE+1 |
/*+ the level redefinition for the Round Robin level +*/ |
typedef struct { |
level_des l; /*+ the standard level descriptor +*/ |
QQUEUE ready; /*+ the ready queue +*/ |
IQUEUE ready; /*+ the ready queue +*/ |
int slice; /*+ the level's time slice +*/ |
77,112 → 81,57 |
the main task +*/ |
} RR_level_des; |
static char *RR_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case RR_READY: return "RR_Ready"; |
case RR_DELAY: return "RR_Delay"; |
default : return "RR_Unknown"; |
} |
} |
/*+ this function is called when a task finish his delay +*/ |
static void RR_timer_delay(void *par) |
{ |
PID p = (PID) par; |
RR_level_des *lev; |
lev = (RR_level_des *)level_table[proc_table[p].task_level]; |
proc_table[p].status = RR_READY; |
qq_insertlast(p,&lev->ready); |
proc_table[p].delay_timer = NIL; /* Paranoia */ |
// kern_printf(" DELAY TIMER %d ", p); |
event_need_reschedule(); |
} |
static int RR_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l)) |
return 0; |
else |
return -1; |
} |
static int RR_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static void RR_level_status(LEVEL l) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->ready); |
kern_printf("Slice: %d \n", lev->slice); |
while (p != NIL) { |
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name, |
RR_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
for (p=0; p<MAX_PROC; p++) |
if (proc_table[p].task_level == l && proc_table[p].status != RR_READY |
&& proc_table[p].status != FREE ) |
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name, |
RR_status_to_a(proc_table[p].status)); |
} |
/* This is not efficient but very fair :-) |
The need of all this stuff is because if a task execute a long time |
due to (shadow!) priority inheritance, then the task shall go to the |
tail of the queue many times... */ |
static PID RR_level_scheduler(LEVEL l) |
static PID RR_public_scheduler(LEVEL l) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
PID p; |
#ifdef RRDEBUG |
rr_printf("(RRs",p); |
#endif |
for (;;) { |
p = qq_queryfirst(&lev->ready); |
if (p == -1) |
p = iq_query_first(&lev->ready); |
if (p == -1) { |
#ifdef RRDEBUG |
rr_printf(" %d)",p); |
#endif |
return p; |
} |
if (proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_extract(p,&lev->ready); |
qq_insertlast(p,&lev->ready); |
iq_extract(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
else |
else { |
#ifdef RRDEBUG |
rr_printf(" %d)",p); |
#endif |
return p; |
} |
} |
} |
static int RR_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int RR_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
/* the RR level always guarantee... the function is defined because |
there can be an aperiodic server at a level with less priority than |
the RR that need guarantee (e.g., a TBS server) */ |
return 1; |
} |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
NRT_TASK_MODEL *nrt; |
#ifdef RRDEBUG |
rr_printf("(create %d!!!!)",p); |
#endif |
static int RR_task_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m; |
if (m->pclass != NRT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
nrt = (NRT_TASK_MODEL *)m; |
/* the task state is set at SLEEP by the general task_create |
the only thing to set remains the capacity stuffs that are set |
to the values passed in the model... */ |
200,53 → 149,27 |
} |
proc_table[p].control |= CONTROL_CAP; |
#ifdef RRDEBUG |
rr_printf("(c%d av%d w%d )",p,proc_table[p].avail_time,proc_table[p].wcet); |
#endif |
return 0; /* OK */ |
} |
static void RR_task_detach(LEVEL l, PID p) |
static void RR_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the RR level doesn't introduce any new field in the TASK_MODEL |
so, all detach stuffs are done by the task_create |
The task state is set at FREE by the general task_create */ |
} |
static int RR_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void RR_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
/* the task state is set EXE by the scheduler() |
we extract the task from the ready queue |
NB: we can't assume that p is the first task in the queue!!! */ |
qq_extract(p, &lev->ready); |
iq_extract(p, &lev->ready); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
// if (nostop) kern_printf("Û"); |
// kern_printf("(RR d %d)",nostop); |
#ifdef RRDEBUG |
rr_printf("(dis%d)",p); |
#endif |
} |
static void RR_task_epilogue(LEVEL l, PID p) |
static void RR_public_epilogue(LEVEL l, PID p) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
254,16 → 177,20 |
qqueue position */ |
if (proc_table[p].avail_time <= 0) { |
proc_table[p].avail_time += proc_table[p].wcet; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
} |
else |
/* curr is >0, so the running task have to run for another curr usec */ |
qq_insertfirst(p,&lev->ready); |
iq_insertfirst(p,&lev->ready); |
proc_table[p].status = RR_READY; |
#ifdef RRDEBUG |
rr_printf("(epi%d)",p); |
#endif |
} |
static void RR_task_activate(LEVEL l, PID p) |
static void RR_public_activate(LEVEL l, PID p) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
272,26 → 199,33 |
if (proc_table[p].status != SLEEP) |
return; |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
/* Insert task in the correct position */ |
proc_table[p].status = RR_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
#ifdef RRDEBUG |
rr_printf("(act%d)",p); |
#endif |
} |
static void RR_task_insert(LEVEL l, PID p) |
static void RR_public_unblock(LEVEL l, PID p) |
{ |
RR_level_des *lev = (RR_level_des *)(level_table[l]); |
/* Similar to RR_task_activate, but we don't check in what state |
the task is and we don't set the request_time */ |
/* Similar to RR_task_activate, |
but we don't check in what state the task is */ |
/* Insert task in the correct position */ |
proc_table[p].status = RR_READY; |
qq_insertlast(p,&lev->ready); |
iq_insertlast(p,&lev->ready); |
#ifdef RRDEBUG |
rr_printf("(ubl%d)",p); |
#endif |
} |
static void RR_task_extract(LEVEL l, PID p) |
static void RR_public_block(LEVEL l, PID p) |
{ |
/* Extract the running task from the level |
. we have already extract it from the ready queue at the dispatch time. |
301,84 → 235,36 |
So, we do nothing!!! |
*/ |
#ifdef RRDEBUG |
rr_printf("(bl%d)",p); |
#endif |
} |
static void RR_task_endcycle(LEVEL l, PID p) |
static int RR_public_message(LEVEL l, PID p, void *m) |
{ |
// RR_level_des *lev = (RR_level_des *)(level_table[l]); |
proc_table[p].status = SLEEP; |
/* this function is equal to the RR_task_extract, except that |
the task fall asleep... */ |
proc_table[p].status = SLEEP; |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
#ifdef RRDEBUG |
rr_printf("(msg%d)",p); |
#endif |
return 0; |
} |
static void RR_task_end(LEVEL l, PID p) |
static void RR_public_end(LEVEL l, PID p) |
{ |
// RR_level_des *lev = (RR_level_des *)(level_table[l]); |
/* we insert the task in the free queue */ |
proc_table[p].status = FREE; |
q_insert(p,&freedesc); |
} |
iq_insertlast(p,&freedesc); |
static void RR_task_sleep(LEVEL l, PID p) |
{ |
proc_table[p].status = SLEEP; |
#ifdef RRDEBUG |
rr_printf("(end%d)",p); |
#endif |
} |
static void RR_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
// RR_level_des *lev = (RR_level_des *)(level_table[l]); |
struct timespec wakeuptime; |
/* equal to RR_task_endcycle */ |
proc_table[p].status = RR_DELAY; |
/* we need to delete this event if we kill the task while it is sleeping */ |
ll_gettime(TIME_EXACT,&wakeuptime); |
ADDUSEC2TIMESPEC(usdelay,&wakeuptime); |
proc_table[p].delay_timer = kern_event_post(&wakeuptime, |
RR_timer_delay, |
(void *)p); |
} |
static int RR_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void RR_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void RR_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
/*+ This init function install the "main" task +*/ |
404,9 → 290,13 |
p = task_create("Main", __init__, (TASK_MODEL *)&m, NULL); |
if (p == NIL) |
kern_printf("\nPanic!!! can't create main task... errno =%d\n",errno); |
printk(KERN_EMERG "Panic!!! can't create main task... errno =%d\n",errno); |
RR_task_activate(lev,p); |
RR_public_activate(lev,p); |
#ifdef RRDEBUG |
rr_printf("(main created %d)",p); |
#endif |
} |
414,7 → 304,7 |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void RR_register_level(TIME slice, |
LEVEL RR_register_level(TIME slice, |
int createmain, |
struct multiboot_info *mb) |
{ |
424,54 → 314,25 |
printk("RR_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(RR_level_des)); |
/* alloc the space needed for the RR_level_des */ |
lev = (RR_level_des *)kern_alloc(sizeof(RR_level_des)); |
lev = (RR_level_des *)level_table[l]; |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, RR_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = RR_LEVEL_CODE; |
lev->l.level_version = RR_LEVEL_VERSION; |
lev->l.public_scheduler = RR_public_scheduler; |
lev->l.public_create = RR_public_create; |
lev->l.public_end = RR_public_end; |
lev->l.public_dispatch = RR_public_dispatch; |
lev->l.public_epilogue = RR_public_epilogue; |
lev->l.public_activate = RR_public_activate; |
lev->l.public_unblock = RR_public_unblock; |
lev->l.public_block = RR_public_block; |
lev->l.public_message = RR_public_message; |
lev->l.level_accept_task_model = RR_level_accept_task_model; |
lev->l.level_accept_guest_model = RR_level_accept_guest_model; |
lev->l.level_status = RR_level_status; |
lev->l.level_scheduler = RR_level_scheduler; |
lev->l.level_guarantee = RR_level_guarantee; |
lev->l.task_create = RR_task_create; |
lev->l.task_detach = RR_task_detach; |
lev->l.task_eligible = RR_task_eligible; |
lev->l.task_dispatch = RR_task_dispatch; |
lev->l.task_epilogue = RR_task_epilogue; |
lev->l.task_activate = RR_task_activate; |
lev->l.task_insert = RR_task_insert; |
lev->l.task_extract = RR_task_extract; |
lev->l.task_endcycle = RR_task_endcycle; |
lev->l.task_end = RR_task_end; |
lev->l.task_sleep = RR_task_sleep; |
lev->l.task_delay = RR_task_delay; |
lev->l.guest_create = RR_guest_create; |
lev->l.guest_detach = RR_guest_detach; |
lev->l.guest_dispatch = RR_guest_dispatch; |
lev->l.guest_epilogue = RR_guest_epilogue; |
lev->l.guest_activate = RR_guest_activate; |
lev->l.guest_insert = RR_guest_insert; |
lev->l.guest_extract = RR_guest_extract; |
lev->l.guest_endcycle = RR_guest_endcycle; |
lev->l.guest_end = RR_guest_end; |
lev->l.guest_sleep = RR_guest_sleep; |
lev->l.guest_delay = RR_guest_delay; |
/* fill the RR descriptor part */ |
qq_init(&lev->ready); |
iq_init(&lev->ready, &freedesc, 0); |
if (slice < RR_MINIMUM_SLICE) slice = RR_MINIMUM_SLICE; |
if (slice > RR_MAXIMUM_SLICE) slice = RR_MAXIMUM_SLICE; |
481,6 → 342,6 |
if (createmain) |
sys_atrunlevel(RR_call_main,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
/shark/tags/rel_0_3/kernel/modules/sem.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: sem.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: sem.c,v 1.3 2003-01-07 17:07:51 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:51 $ |
------------ |
This file contains the Hartik 3.3.1 Semaphore functions |
79,7 → 79,7 |
char *name; /* a name, for named semaphores */ |
int index; /* an index for sem_open, containing the sem number */ |
int count; /* the semaphore counter */ |
QQUEUE blocked; /* the blocked processes queue */ |
IQUEUE blocked; /* the blocked processes queue */ |
int next; /* the semaphore queue */ |
BYTE used; /* 1 if the semaphore is used */ |
} sem_table[SEM_NSEMS_MAX]; |
91,7 → 91,7 |
int sem; /* the semaphore on whitch the process is blocked */ |
} sp_table[MAX_PROC]; |
static QUEUE free_sem; /* Queue of free sem */ |
static int free_sem; /* Queue of free sem */ |
112,10 → 112,10 |
task_testcancel */ |
/* extract the process from the semaphore queue... */ |
qq_extract(i,&sem_table[ sp_table[i].sem ].blocked); |
iq_extract(i,&sem_table[ sp_table[i].sem ].blocked); |
l = proc_table[i].task_level; |
level_table[l]->task_insert(l,i); |
level_table[l]->public_unblock(l,i); |
return 1; |
} |
134,7 → 134,7 |
sem_table[i].name = NULL; |
sem_table[i].index = i; |
sem_table[i].count = 0; |
qq_init(&sem_table[i].blocked); |
iq_init(&sem_table[i].blocked, &freedesc, 0); |
sem_table[i].next = i+1; |
sem_table[i].used = 0; |
} |
160,7 → 160,7 |
free_sem = sem_table[*sem].next; |
sem_table[*sem].name = NULL; |
sem_table[*sem].count = value; |
qq_init(&sem_table[*sem].blocked); |
iq_init(&sem_table[*sem].blocked, &freedesc, 0); |
sem_table[*sem].used = 1; |
} |
else { |
254,7 → 254,7 |
sem_table[sem].name = kern_alloc(strlen((char *)name)+1); |
strcpy(sem_table[sem].name, (char *)name); |
sem_table[sem].count = j; |
qq_init(&sem_table[sem].blocked); |
iq_init(&sem_table[sem].blocked, &freedesc, 0); |
sem_table[sem].used = 1; |
kern_sti(); |
return &sem_table[sem].index; |
350,25 → 350,14 |
if (s1->blocked.first != NIL || s1->count == 0) { |
/* We must block exec task */ |
LEVEL l; /* for readableness only */ |
TIME tx; /* a dummy TIME for timespec operations */ |
struct timespec ty; /* a dummy timespec for timespec operations */ |
/* tracer stuff */ |
trc_logevent(TRC_SEM_WAIT,s); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task in the semaphore queue */ |
proc_table[exec_shadow].status = WAIT_SEM; |
378,7 → 367,7 |
sp_table[exec_shadow].sem = *s; |
/* ...and put it in sem queue */ |
qq_insertlast(exec_shadow,&s1->blocked); |
iq_insertlast(exec_shadow,&s1->blocked); |
/* and finally we reschedule */ |
exec = exec_shadow = -1; |
476,25 → 465,14 |
if (s1->blocked.first != NIL || s1->count < n) { |
/* We must block exec task */ |
LEVEL l; /* for readableness only */ |
TIME tx; /* a dummy TIME for timespec operations */ |
struct timespec ty; /* a dummy timespec for timespec operations */ |
/* tracer */ |
trc_logevent(TRC_SEM_WAIT,s); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task in the semaphore queue */ |
proc_table[exec_shadow].status = WAIT_SEM; |
504,7 → 482,7 |
sp_table[exec_shadow].sem = *s; |
/* ...and put it in sem queue */ |
qq_insertlast(exec_shadow,&s1->blocked); |
iq_insertlast(exec_shadow,&s1->blocked); |
/* and finally we reschedule */ |
exec = exec_shadow = -1; |
554,10 → 532,10 |
s1->count -= sp_table[p].decsem; |
/* Get task from blocked queue */ |
qq_extract(p,&s1->blocked); |
iq_extract(p,&s1->blocked); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
/* only a task can be awaken */ |
/* Preempt if necessary */ |
event_need_reschedule(); |
579,10 → 557,10 |
s1->count -= sp_table[p].decsem; |
/* Get task from blocked queue */ |
qq_extract(p,&s1->blocked); |
iq_extract(p,&s1->blocked); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
/* only a task can be awaken */ |
/* Preempt if necessary */ |
scheduler(); |
627,10 → 605,10 |
s1->count -= sp_table[p].decsem; |
/* Get task from blocked queue */ |
qq_extract(p,&s1->blocked); |
iq_extract(p,&s1->blocked); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
/* Next task to wake */ |
p = s1->blocked.first; |
657,10 → 635,10 |
s1->count -= sp_table[p].decsem; |
/* Get task from blocked queue */ |
qq_extract(p,&s1->blocked); |
iq_extract(p,&s1->blocked); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
/* Next task to wake */ |
p = s1->blocked.first; |
695,16 → 673,16 |
kern_cli(); |
if (sem_table[*sem].blocked.first == NIL) |
if (iq_isempty(&sem_table[*sem].blocked)) |
/* the sem is free */ |
*sval = sem_table[*sem].count; |
else { |
/* the sem is busy */ |
*sval = 0; |
p = sem_table[*sem].blocked.first; |
p = iq_query_first(&sem_table[*sem].blocked); |
do { |
(*sval)--; |
p = proc_table[p].next; |
p = iq_query_next(p, &sem_table[*sem].blocked); |
} while (p != NIL); |
} |
/shark/tags/rel_0_3/kernel/modules/ss.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: ss.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: ss.c,v 1.4 2003-01-07 17:07:51 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:51 $ |
------------ |
This file contains the aperiodic Sporadic Server (SS). |
125,6 → 125,7 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/* For debugging purpose */ |
//#define DEBUG 1 |
155,7 → 156,7 |
bandwidth_t U; /*+ the used bandwidth by the server +*/ |
QQUEUE wait; /*+ the wait queue of the SS +*/ |
IQUEUE wait; /*+ the wait queue of the SS +*/ |
PID activated; /*+ the task inserted in another queue +*/ |
int flags; /*+ the init flags... +*/ |
174,7 → 175,7 |
} SS_level_des; |
/*+ function prototypes +*/ |
void SS_level_status(LEVEL l); |
void SS_internal_status(LEVEL l); |
static void SS_replenish_timer(void *arg); |
/*-------------------------------------------------------------------*/ |
313,8 → 314,8 |
if(ssq_inslast(l, lev->replenish_amount) == NIL) { |
kern_printf("SS: no more space to post replenishment\n"); |
kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n"); |
SS_level_status(l); |
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow); |
SS_internal_status(l); |
kern_raise(XINVALID_SS_REPLENISH,exec_shadow); |
#ifdef DEBUG |
sys_abort(-1); |
exit(-1); |
324,8 → 325,8 |
} |
else { |
kern_printf("SS not active when posting R.A.\n"); |
SS_level_status(l); |
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow); |
SS_internal_status(l); |
kern_raise(XINVALID_SS_REPLENISH,exec_shadow); |
#ifdef DEBUG |
sys_abort(-1); |
exit(-1); |
368,8 → 369,7 |
job_task_default_model(j,lev->lastdline); |
job_task_def_period(j,lev->period); |
level_table[m]->guest_create(m,p,(TASK_MODEL *)&j); |
level_table[m]->guest_activate(m,p); |
level_table[m]->private_insert(m,p,(TASK_MODEL *)&j); |
#ifdef DEBUG |
kern_printf("PID:%p lastdl:%d.%d ",p,lev->lastdline.tv_sec,lev->lastdline.tv_nsec); |
400,8 → 400,8 |
if(ssq_inslast(l, tx+lev->replenish_amount) == NIL) { |
kern_printf("SS: no more space to post replenishment\n"); |
kern_printf(" You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n"); |
SS_level_status(l); |
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow); |
SS_internal_status(l); |
kern_raise(XINVALID_SS_REPLENISH,exec_shadow); |
#ifdef DEBUG |
sys_abort(-1); |
exit(-1); |
456,8 → 456,8 |
else { |
/* replenish queue is empty */ |
kern_printf("Replenish Timer fires but no Replenish Amount defined\n"); |
SS_level_status(l); |
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow); |
SS_internal_status(l); |
kern_raise(XINVALID_SS_REPLENISH,exec_shadow); |
#ifdef DEBUG |
sys_abort(-1); |
exit(-1); |
465,13 → 465,13 |
} |
if (lev->availCs > 0 && lev->activated == NIL) { |
if (qq_queryfirst(&lev->wait) != NIL) { |
lev->activated = qq_getfirst(&lev->wait); |
if (iq_query_first(&lev->wait) != NIL) { |
lev->activated = iq_getfirst(&lev->wait); |
/* if server is active, replenish time already set */ |
if (lev->server_active == SS_SERVER_NOTACTIVE) { |
lev->server_active = SS_SERVER_ACTIVE; |
/* set replenish time */ |
ll_gettime(TIME_EXACT, &ty); |
kern_gettime(&ty); |
ADDUSEC2TIMESPEC(lev->period, &ty); |
TIMESPEC_ASSIGN(&lev->lastdline, &ty); |
#ifdef DEBUG |
488,7 → 488,7 |
static char *SS_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
return "Unavailable"; //status_to_a(status); |
switch (status) { |
case SS_WAIT : return "SS_Wait"; |
501,42 → 501,10 |
/*** Level functions ***/ |
static int SS_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
void SS_internal_status(LEVEL l) |
{ |
#ifdef DEBUG |
kern_printf("SS_levacctm cl=%d ",m->pclass); |
#endif |
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) { |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
if (s->periodicity == APERIODIC) { |
#ifdef DEBUG |
kern_printf("AcceptApe "); |
#endif |
return 0; |
} |
#ifdef DEBUG |
kern_printf("NAcceptApe "); |
#endif |
} |
#ifdef DEBUG |
kern_printf("NAccept "); |
#endif |
return -1; |
} |
static int SS_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
/* SS doesn't handles guest tasks */ |
return -1; |
} |
void SS_level_status(LEVEL l) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->wait); |
PID p = iq_query_first(&lev->wait); |
kern_printf("On-line guarantee : %s\n", |
(lev->flags & SS_ENABLE_GUARANTEE_EDF || |
554,8 → 522,8 |
kern_printf("Activated: Pid: %d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n", |
lev->activated, |
proc_table[lev->activated].name, |
proc_table[lev->activated].timespec_priority.tv_sec, |
proc_table[lev->activated].timespec_priority.tv_nsec, |
iq_query_timespec(lev->activated,&lev->wait)->tv_sec, |
iq_query_timespec(lev->activated,&lev->wait)->tv_nsec, |
lev->nact[lev->activated], |
SS_status_to_a(proc_table[lev->activated].status)); |
564,23 → 532,12 |
p, |
proc_table[p].name, |
SS_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
p = iq_query_next(p, &lev->wait); |
} |
} |
static PID SS_level_scheduler(LEVEL l) |
static PID SS_public_schedulerbackground(LEVEL l) |
{ |
#ifdef DEBUG |
kern_printf("SS_levsch "); |
#endif |
/* the SS don't schedule anything... |
it's an RM level or similar that do it! */ |
return NIL; |
} |
static PID SS_level_schedulerbackground(LEVEL l) |
{ |
/* the SS catch the background time to exec aperiodic activities */ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
593,11 → 550,11 |
if (lev->flags & SS_BACKGROUND_BLOCK) |
return NIL; |
else |
return qq_queryfirst(&lev->wait); |
return iq_query_first(&lev->wait); |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int SS_level_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth) |
static int SS_public_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
613,7 → 570,7 |
return 0; |
} |
static int SS_level_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth) |
static int SS_public_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
634,17 → 591,22 |
/*** Task functions ***/ |
static int SS_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int SS_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; /* if the SS_task_create is |
called, the pclass must |
be a valid pclass. */ |
SOFT_TASK_MODEL *s; |
#ifdef DEBUG |
kern_printf("SS_taskcre "); |
#endif |
if (m->pclass != SOFT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (s->periodicity != APERIODIC) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (s->arrivals == SAVE_ARRIVALS) |
lev->nact[p] = 0; |
else |
653,19 → 615,8 |
return 0; /* OK, also if the task cannot be guaranteed */ |
} |
static void SS_task_detach(LEVEL l, PID p) |
static void SS_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* No cleanups to do here. |
SS level doesn't introduce any dynamic allocated field. */ |
} |
static int SS_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* If the task p is chosen, it is always eligible */ |
} |
static void SS_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
696,7 → 647,7 |
to exe before calling task_dispatch. |
We have to check lev->activated != p instead */ |
if (lev->activated != p) { |
qq_extract(p, &lev->wait); |
iq_extract(p, &lev->wait); |
#ifdef DEBUG |
kern_printf("extr task:%d ",p); |
#endif |
706,7 → 657,7 |
if (nostop) kern_printf("(gd status=%d)",proc_table[p].status); |
#endif |
level_table[lev->scheduling_level]-> |
guest_dispatch(lev->scheduling_level,p,nostop); |
private_dispatch(lev->scheduling_level,p,nostop); |
} |
/* set capacity timer */ |
723,7 → 674,7 |
} |
} |
static void SS_task_epilogue(LEVEL l, PID p) { |
static void SS_public_epilogue(LEVEL l, PID p) { |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
765,8 → 716,8 |
if(ssq_inslast(l, lev->replenish_amount) == NIL) { |
kern_printf("SS: no more space to post replenishment\n"); |
kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n"); |
SS_level_status(l); |
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow); |
SS_internal_status(l); |
kern_raise(XINVALID_SS_REPLENISH,exec_shadow); |
#ifdef DEBUG |
sys_abort(-1); |
exit(-1); |
777,9 → 728,9 |
} |
if (lev->activated == p) |
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p); |
level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p); |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = SS_WAIT; |
lev->activated = NIL; |
} |
786,20 → 737,20 |
else { |
/* The task has been preempted. |
It returns into the ready queue or to the |
wait queue by calling the guest_epilogue... */ |
wait queue by calling the private_epilogue... */ |
if (lev->activated == p) { /* goes into ready queue */ |
level_table[ lev->scheduling_level ]-> |
guest_epilogue(lev->scheduling_level,p); |
private_epilogue(lev->scheduling_level,p); |
} |
else { /* goes into wait queue */ |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = SS_WAIT; |
} |
} |
} |
static void SS_task_activate(LEVEL l, PID p) |
static void SS_public_activate(LEVEL l, PID p) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
812,8 → 763,6 |
if (lev->nact[p] != -1) lev->nact[p]++; |
} |
else if (proc_table[p].status == SLEEP) { |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
// kern_printf("-%d.%d- ",proc_table[p].request_time.tv_sec,proc_table[p].request_time.tv_nsec); |
if (lev->activated == NIL && lev->availCs > 0) { |
if(!BACKGROUND_ON) { |
/* if server is active, replenish time already set */ |
820,7 → 769,7 |
if (lev->server_active == SS_SERVER_NOTACTIVE) { |
lev->server_active = SS_SERVER_ACTIVE; |
/* set replenish time */ |
TIMESPEC_ASSIGN(&ty, &proc_table[p].request_time); |
kern_gettime(&ty); |
ADDUSEC2TIMESPEC(lev->period, &ty); |
TIMESPEC_ASSIGN(&lev->lastdline, &ty); |
#ifdef DEBUG |
833,7 → 782,7 |
SS_activation(lev); |
} |
else { |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
proc_table[p].status = SS_WAIT; |
} |
} |
847,7 → 796,7 |
} |
} |
static void SS_task_insert(LEVEL l, PID p) |
static void SS_public_unblock(LEVEL l, PID p) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
860,11 → 809,11 |
/* when we reinsert the task into the system, the server capacity |
is always 0 because nobody executes with the SS before... */ |
qq_insertfirst(p, &lev->wait); |
iq_insertfirst(p, &lev->wait); |
proc_table[p].status = SS_WAIT; |
} |
static void SS_task_extract(LEVEL l, PID p) |
static void SS_public_block(LEVEL l, PID p) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
883,10 → 832,10 |
lev->flags |= SS_BACKGROUND_BLOCK; |
if (lev->activated == p) |
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p); |
level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p); |
} |
static void SS_task_endcycle(LEVEL l, PID p) |
static int SS_public_message(LEVEL l, PID p, void *m) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
910,13 → 859,13 |
} |
if (lev->activated == p) |
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p); |
level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p); |
else |
qq_extract(p, &lev->wait); |
iq_extract(p, &lev->wait); |
if (lev->nact[p] > 0) { |
lev->nact[p]--; |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
proc_table[p].status = SS_WAIT; |
} |
else { |
923,7 → 872,7 |
proc_table[p].status = SLEEP; |
} |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) { |
SS_activation(lev); |
} |
933,9 → 882,14 |
SS_set_ra(l); |
} |
} |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
return 0; |
} |
static void SS_task_end(LEVEL l, PID p) |
static void SS_public_end(LEVEL l, PID p) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
959,12 → 913,12 |
} |
if (lev->activated == p) |
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p); |
level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p); |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
iq_insertfirst(p,&freedesc); |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) { |
SS_activation(lev); |
} |
976,134 → 930,14 |
} |
} |
static void SS_task_sleep(LEVEL l, PID p) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
int tx; |
#ifdef DEBUG |
kern_printf("SS_tasksle "); |
#endif |
/* update the server capacity */ |
if (BACKGROUND_ON) |
lev->flags &= ~SS_BACKGROUND; |
else { |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
lev->availCs -= tx; |
lev->replenish_amount += tx; |
#ifdef DEBUG |
kern_printf("PID:%d RA=%d ",p,lev->replenish_amount); |
#endif |
} |
lev->nact[p] = 0; |
if (lev->activated == p) |
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p); |
else |
qq_extract(p, &lev->wait); |
proc_table[p].status = SLEEP; |
lev->activated = qq_getfirst(&lev->wait); |
if (lev->activated != NIL) { |
SS_activation(lev); |
} |
else { |
if(!(BACKGROUND_ON)){ |
/* No more task to schedule; set replenish amount */ |
SS_set_ra(l); |
} |
} |
} |
static void SS_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
struct timespec ty; |
int tx; |
#ifdef DEBUG |
kern_printf("SS_tdelay "); |
#endif |
/* update the server capacity */ |
if (BACKGROUND_ON) |
lev->flags &= ~SS_BACKGROUND; |
else { |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
lev->availCs -= tx; |
lev->replenish_amount += tx; |
#ifdef DEBUG |
kern_printf("PID:%d RA=%d ",p,lev->replenish_amount); |
#endif |
/* Here set replenish amount because delay may be too long and |
replenish time could arrive */ |
SS_set_ra(l); |
} |
/* I hope no delay when owning a mutex... */ |
if (lev->activated == p) |
level_table[ lev->scheduling_level ]-> |
guest_delay(lev->scheduling_level,p,usdelay); |
} |
/*-------------------------------------------------------------------*/ |
/*** Guest functions ***/ |
/* SS doesn't handles guest tasks */ |
static int SS_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void SS_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void SS_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/*-------------------------------------------------------------------*/ |
/*** Registration functions ***/ |
/*+ Registration function: |
int flags the init flags ... see SS.h +*/ |
void SS_register_level(int flags, LEVEL master, int Cs, int per) |
LEVEL SS_register_level(int flags, LEVEL master, int Cs, int per) |
{ |
LEVEL l; /* the level that we register */ |
SS_level_des *lev; /* for readableness only */ |
1110,63 → 944,33 |
PID i; /* a counter */ |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
#ifdef DEBUG |
kern_printf("Alloc des %d ",l); |
#endif |
l = level_alloc_descriptor(sizeof(SS_level_des)); |
/* alloc the space needed for the SS_level_des */ |
lev = (SS_level_des *)kern_alloc(sizeof(SS_level_des)); |
lev = (SS_level_des *)level_table[l]; |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
printk(" lev=%d\n",(int)lev); |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, SS_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = SS_LEVEL_CODE; |
lev->l.level_version = SS_LEVEL_VERSION; |
lev->l.level_accept_task_model = SS_level_accept_task_model; |
lev->l.level_accept_guest_model = SS_level_accept_guest_model; |
lev->l.level_status = SS_level_status; |
if (flags & SS_ENABLE_BACKGROUND) |
lev->l.level_scheduler = SS_level_schedulerbackground; |
else |
lev->l.level_scheduler = SS_level_scheduler; |
lev->l.public_scheduler = SS_public_schedulerbackground; |
if (flags & SS_ENABLE_GUARANTEE_EDF) |
lev->l.level_guarantee = SS_level_guaranteeEDF; |
lev->l.public_guarantee = SS_public_guaranteeEDF; |
else if (flags & SS_ENABLE_GUARANTEE_RM) |
lev->l.level_guarantee = SS_level_guaranteeRM; |
lev->l.public_guarantee = SS_public_guaranteeRM; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.task_create = SS_task_create; |
lev->l.task_detach = SS_task_detach; |
lev->l.task_eligible = SS_task_eligible; |
lev->l.task_dispatch = SS_task_dispatch; |
lev->l.task_epilogue = SS_task_epilogue; |
lev->l.task_activate = SS_task_activate; |
lev->l.task_insert = SS_task_insert; |
lev->l.task_extract = SS_task_extract; |
lev->l.task_endcycle = SS_task_endcycle; |
lev->l.task_end = SS_task_end; |
lev->l.task_sleep = SS_task_sleep; |
lev->l.task_delay = SS_task_delay; |
lev->l.public_create = SS_public_create; |
lev->l.public_end = SS_public_end; |
lev->l.public_dispatch = SS_public_dispatch; |
lev->l.public_epilogue = SS_public_epilogue; |
lev->l.public_activate = SS_public_activate; |
lev->l.public_unblock = SS_public_unblock; |
lev->l.public_block = SS_public_block; |
lev->l.public_message = SS_public_message; |
lev->l.guest_create = SS_guest_create; |
lev->l.guest_detach = SS_guest_detach; |
lev->l.guest_dispatch = SS_guest_dispatch; |
lev->l.guest_epilogue = SS_guest_epilogue; |
lev->l.guest_activate = SS_guest_activate; |
lev->l.guest_insert = SS_guest_insert; |
lev->l.guest_extract = SS_guest_extract; |
lev->l.guest_endcycle = SS_guest_endcycle; |
lev->l.guest_end = SS_guest_end; |
lev->l.guest_sleep = SS_guest_sleep; |
lev->l.guest_delay = SS_guest_delay; |
/* fill the SS descriptor part */ |
for (i=0; i<MAX_PROC; i++) |
1177,7 → 981,7 |
lev->period = per; |
qq_init(&lev->wait); |
iq_init(&lev->wait, &freedesc, 0); |
lev->activated = NIL; |
lev->U = (MAX_BANDWIDTH / per) * Cs; |
1195,23 → 999,19 |
lev->rcount=0; |
lev->replenish_amount=0; |
lev->server_active=SS_SERVER_NOTACTIVE; |
return l; |
} |
bandwidth_t SS_usedbandwidth(LEVEL l) |
{ |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
if (lev->l.level_code == SS_LEVEL_CODE && |
lev->l.level_version == SS_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
int SS_availCs(LEVEL l) { |
SS_level_des *lev = (SS_level_des *)(level_table[l]); |
if (lev->l.level_code == SS_LEVEL_CODE && |
lev->l.level_version == SS_LEVEL_VERSION) |
return lev->availCs; |
else |
return 0; |
return lev->availCs; |
} |
/shark/tags/rel_0_3/kernel/modules/tbs.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: tbs.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: tbs.c,v 1.4 2003-01-07 17:07:51 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:51 $ |
------------ |
This file contains the aperiodic server TBS (Total Bandwidth Server) |
60,6 → 60,7 |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/*+ 4 debug purposes +*/ |
#undef TBS_TEST |
84,7 → 85,7 |
struct timespec lastdline; /*+ the last deadline assigned to |
a TBS task +*/ |
QQUEUE wait; /*+ the wait queue of the TBS +*/ |
IQUEUE wait; /*+ the wait queue of the TBS +*/ |
PID activated; /*+ the task inserted in another queue +*/ |
int flags; /*+ the init flags... +*/ |
97,19 → 98,6 |
} TBS_level_des; |
static char *TBS_status_to_a(WORD status) |
{ |
if (status < MODULE_STATUS_BASE) |
return status_to_a(status); |
switch (status) { |
case TBS_WCET_VIOLATED: return "TBS_Wcet_Violated"; |
case TBS_WAIT : return "TBS_Wait"; |
default : return "TBS_Unknown"; |
} |
} |
#ifdef TESTG |
#include "drivers/glib.h" |
#endif |
131,9 → 119,6 |
/* we compute a suitable deadline for the task */ |
drel = (proc_table[p].wcet * lev->band_den) / lev->band_num; |
if (TIMESPEC_A_GT_B(&proc_table[p].request_time, &lev->lastdline)) |
TIMESPEC_ASSIGN(&lev->lastdline, &proc_table[p].request_time ); |
ADDUSEC2TIMESPEC(drel, &lev->lastdline); |
#ifdef TESTG |
147,8 → 132,7 |
/* and we insert the task in another level */ |
m = lev->scheduling_level; |
job_task_default_model(j,lev->lastdline); |
level_table[m]->guest_create(m,p,(TASK_MODEL *)&j); |
level_table[m]->guest_activate(m,p); |
level_table[m]->private_insert(m,p,(TASK_MODEL *)&j); |
#ifdef TBS_TEST |
kern_printf("TBS_activation: lastdline %ds %dns\n",lev->lastdline.tv_sec,lev->lastdline.tv_nsec); |
176,74 → 160,8 |
#endif |
} |
static int TBS_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
{ |
if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) { |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
if (s->wcet && s->periodicity == APERIODIC) |
return 0; |
} |
return -1; |
} |
static int TBS_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
{ |
return -1; |
} |
static char *onoff(int i) |
{ |
if (i) |
return "On "; |
else |
return "Off"; |
} |
static void TBS_level_status(LEVEL l) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
PID p = qq_queryfirst(&lev->wait); |
kern_printf("Wcet Check : %s\n", |
onoff(lev->flags & TBS_ENABLE_WCET_CHECK)); |
kern_printf("On-line guarantee : %s\n", |
onoff(lev->flags & TBS_ENABLE_GUARANTEE)); |
kern_printf("Used Bandwidth : %u/%u\n", |
lev->U, MAX_BANDWIDTH); |
kern_printf("Last deadline : %lds %ldns\n",lev->lastdline.tv_sec, |
lev->lastdline.tv_nsec); |
if (lev->activated != -1) |
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%9ld nact: %d Stat: %s\n", |
lev->activated, |
proc_table[lev->activated].name, |
proc_table[lev->activated].timespec_priority.tv_sec, |
proc_table[lev->activated].timespec_priority.tv_nsec, |
lev->nact[lev->activated], |
TBS_status_to_a(proc_table[lev->activated].status)); |
while (p != NIL) { |
kern_printf("Pid: %2d Name: %10s Stat: %s\n", |
p, |
proc_table[p].name, |
TBS_status_to_a(proc_table[p].status)); |
p = proc_table[p].next; |
} |
} |
static PID TBS_level_scheduler(LEVEL l) |
{ |
/* the TBS don't schedule anything... |
it's an EDF level or similar that do it! */ |
return NIL; |
} |
/* The on-line guarantee is enabled only if the appropriate flag is set... */ |
static int TBS_level_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
static int TBS_public_guarantee(LEVEL l, bandwidth_t *freebandwidth) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
255,14 → 173,19 |
return 0; |
} |
static int TBS_task_create(LEVEL l, PID p, TASK_MODEL *m) |
static int TBS_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
/* if the TBS_task_create is called, then the pclass must be a |
valid pclass. */ |
SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; |
SOFT_TASK_MODEL *s; |
if (m->pclass != SOFT_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
s = (SOFT_TASK_MODEL *)m; |
if (!(s->wcet && s->periodicity == APERIODIC)) return -1; |
proc_table[p].wcet = s->wcet; |
/* Enable wcet check */ |
278,26 → 201,8 |
return 0; /* OK, also if the task cannot be guaranteed... */ |
} |
static void TBS_task_detach(LEVEL l, PID p) |
static void TBS_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the TBS level doesn't introduce any dinamic allocated new field. */ |
} |
static int TBS_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
#ifdef __TEST1__ |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
#endif |
static void TBS_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
/* there is at least one task ready inserted in an EDF or similar |
304,20 → 209,10 |
level */ |
level_table[ lev->scheduling_level ]-> |
guest_dispatch(lev->scheduling_level,p,nostop); |
#ifdef __TEST1__ |
if (testactive) |
{ |
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time); |
s_curr[useds] = proc_table[p].avail_time; |
s_PID[useds] = p; |
useds++; |
} |
#endif |
private_dispatch(lev->scheduling_level,p,nostop); |
} |
static void TBS_task_epilogue(LEVEL l, PID p) |
static void TBS_public_epilogue(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
331,7 → 226,7 |
have to be put in place... this code is identical to the |
TBS_task_end */ |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
/* we reclaim an avail time that can be <0 due to the timer |
approximations -> we have to postpone the deadline a little! |
345,7 → 240,7 |
lev->lastdline.tv_sec, lev->lastdline.tv_nsec); |
#endif |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
TBS_activation(lev); |
} |
353,18 → 248,22 |
/* the task has been preempted. it returns into the ready queue by |
calling the guest_epilogue... */ |
level_table[ lev->scheduling_level ]-> |
guest_epilogue(lev->scheduling_level,p); |
private_epilogue(lev->scheduling_level,p); |
} |
static void TBS_task_activate(LEVEL l, PID p) |
static void TBS_public_activate(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
struct timespec t; |
if (proc_table[p].status == SLEEP || |
proc_table[p].status == TBS_WCET_VIOLATED) { |
ll_gettime(TIME_EXACT, &proc_table[p].request_time); |
kern_gettime(&t); |
if (TIMESPEC_A_GT_B(&t, &lev->lastdline)) |
TIMESPEC_ASSIGN(&lev->lastdline, &t ); |
if (lev->activated == NIL) { |
/* This is the first task in the level, so we activate it immediately */ |
lev->activated = p; |
372,7 → 271,7 |
} |
else { |
proc_table[p].status = TBS_WAIT; |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
} |
} |
else if (lev->flag[p] & TBS_SAVE_ARRIVALS) |
381,23 → 280,25 |
kern_printf("TBSREJ!!!");*/ |
} |
static void TBS_task_insert(LEVEL l, PID p) |
static void TBS_public_unblock(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
JOB_TASK_MODEL j; |
level_table[ lev->scheduling_level ]-> |
guest_insert(lev->scheduling_level,p); |
job_task_default_model(j,lev->lastdline); |
level_table[lev->scheduling_level]-> |
private_insert(lev->scheduling_level,p,(TASK_MODEL *)&j); |
} |
static void TBS_task_extract(LEVEL l, PID p) |
static void TBS_public_block(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
level_table[ lev->scheduling_level ]-> |
guest_extract(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
} |
static void TBS_task_endcycle(LEVEL l, PID p) |
static int TBS_public_message(LEVEL l, PID p, void *m) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
405,7 → 306,7 |
that implements a single activation, so we have to call |
the guest_end, that representsa single activation... */ |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
TBS_bandwidth_reclaiming(lev,p); |
417,105 → 318,38 |
// lev->nact[p] can be >0 only if the SAVE_ARRIVALS bit is set |
lev->nact[p]--; |
proc_table[p].status = TBS_WAIT; |
qq_insertlast(p, &lev->wait); |
iq_insertlast(p, &lev->wait); |
} |
else |
proc_table[p].status = SLEEP; |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
TBS_activation(lev); |
jet_update_endcycle(); /* Update the Jet data... */ |
trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */ |
return 0; |
} |
static void TBS_task_end(LEVEL l, PID p) |
static void TBS_public_end(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
private_extract(lev->scheduling_level,p); |
TBS_bandwidth_reclaiming(lev,p); |
proc_table[p].status = FREE; |
q_insertfirst(p,&freedesc); |
iq_insertfirst(p,&freedesc); |
lev->activated = qq_getfirst(&lev->wait); |
lev->activated = iq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
TBS_activation(lev); |
} |
static void TBS_task_sleep(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
/* a task activation is finished, but we are using a JOB_TASK_MODEL |
that implements a single activation, so we have to call |
the guest_end, that representsa single activation... */ |
level_table[ lev->scheduling_level ]-> |
guest_end(lev->scheduling_level,p); |
TBS_bandwidth_reclaiming(lev,p); |
/* we reset the capacity counters... */ |
if (lev->flags & TBS_ENABLE_WCET_CHECK) |
proc_table[p].avail_time = proc_table[p].wcet; |
proc_table[p].status = SLEEP; |
lev->nact[p] = 0; |
lev->activated = qq_getfirst(&lev->wait); |
if (lev->activated != NIL) |
TBS_activation(lev); |
} |
static void TBS_task_delay(LEVEL l, PID p, TIME usdelay) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
level_table[ lev->scheduling_level ]-> |
guest_delay(lev->scheduling_level,p,usdelay); |
} |
static int TBS_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void TBS_guest_detach(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_epilogue(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_activate(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_insert(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_extract(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_endcycle(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_end(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_sleep(LEVEL l, PID p) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void TBS_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_raise(XUNVALID_GUEST,exec_shadow); } |
/* Registration functions */ |
/*+ Registration function: |
529,58 → 363,28 |
printk("TBS_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(TBS_level_des)); |
printk(" alloco descrittore %d %d\n",l,(int)sizeof(TBS_level_des)); |
lev = (TBS_level_des *)level_table[l]; |
/* alloc the space needed for the TBS_level_des */ |
lev = (TBS_level_des *)kern_alloc(sizeof(TBS_level_des)); |
printk(" lev=%d\n",(int)lev); |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, TBS_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = TBS_LEVEL_CODE; |
lev->l.level_version = TBS_LEVEL_VERSION; |
lev->l.level_accept_task_model = TBS_level_accept_task_model; |
lev->l.level_accept_guest_model = TBS_level_accept_guest_model; |
lev->l.level_status = TBS_level_status; |
lev->l.level_scheduler = TBS_level_scheduler; |
if (flags & TBS_ENABLE_GUARANTEE) |
lev->l.level_guarantee = TBS_level_guarantee; |
lev->l.public_guarantee = TBS_public_guarantee; |
else |
lev->l.level_guarantee = NULL; |
lev->l.public_guarantee = NULL; |
lev->l.task_create = TBS_task_create; |
lev->l.task_detach = TBS_task_detach; |
lev->l.task_eligible = TBS_task_eligible; |
lev->l.task_dispatch = TBS_task_dispatch; |
lev->l.task_epilogue = TBS_task_epilogue; |
lev->l.task_activate = TBS_task_activate; |
lev->l.task_insert = TBS_task_insert; |
lev->l.task_extract = TBS_task_extract; |
lev->l.task_endcycle = TBS_task_endcycle; |
lev->l.task_end = TBS_task_end; |
lev->l.task_sleep = TBS_task_sleep; |
lev->l.task_delay = TBS_task_delay; |
lev->l.public_guarantee = TBS_public_guarantee; |
lev->l.public_create = TBS_public_create; |
lev->l.public_end = TBS_public_end; |
lev->l.public_dispatch = TBS_public_dispatch; |
lev->l.public_epilogue = TBS_public_epilogue; |
lev->l.public_activate = TBS_public_activate; |
lev->l.public_unblock = TBS_public_unblock; |
lev->l.public_block = TBS_public_block; |
lev->l.public_message = TBS_public_message; |
lev->l.guest_create = TBS_guest_create; |
lev->l.guest_detach = TBS_guest_detach; |
lev->l.guest_dispatch = TBS_guest_dispatch; |
lev->l.guest_epilogue = TBS_guest_epilogue; |
lev->l.guest_activate = TBS_guest_activate; |
lev->l.guest_insert = TBS_guest_insert; |
lev->l.guest_extract = TBS_guest_extract; |
lev->l.guest_endcycle = TBS_guest_endcycle; |
lev->l.guest_end = TBS_guest_end; |
lev->l.guest_sleep = TBS_guest_sleep; |
lev->l.guest_delay = TBS_guest_delay; |
/* fill the TBS descriptor part */ |
for (i = 0; i < MAX_PROC; i++) { |
590,7 → 394,7 |
NULL_TIMESPEC(&lev->lastdline); |
qq_init(&lev->wait); |
iq_init(&lev->wait, &freedesc, 0); |
lev->activated = NIL; |
lev->U = (MAX_BANDWIDTH / den) * num; |
605,20 → 409,14 |
bandwidth_t TBS_usedbandwidth(LEVEL l) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
if (lev->l.level_code == TBS_LEVEL_CODE && |
lev->l.level_version == TBS_LEVEL_VERSION) |
return lev->U; |
else |
return 0; |
return lev->U; |
} |
int TBS_get_nact(LEVEL l, PID p) |
{ |
TBS_level_des *lev = (TBS_level_des *)(level_table[l]); |
if (lev->l.level_code == TBS_LEVEL_CODE && |
lev->l.level_version == TBS_LEVEL_VERSION) |
return lev->nact[p]; |
else |
return -1; |
return lev->nact[p]; |
} |
/shark/tags/rel_0_3/kernel/modules/dummy.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: dummy.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: dummy.c,v 1.4 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.4 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
This file contains the Dummy scheduling module |
58,7 → 58,6 |
#include <ll/string.h> |
#include <kernel/config.h> |
#include <sys/types.h> |
#include <modules/codes.h> |
#include <kernel/model.h> |
#include <kernel/descr.h> |
#include <kernel/var.h> |
74,42 → 73,21 |
} dummy_level_des; |
static int dummy_level_accept_task_model(LEVEL l, TASK_MODEL *m) |
static PID dummy_public_scheduler(LEVEL l) |
{ |
dummy_level_des *lev = (dummy_level_des *)(level_table[l]); |
if ((m->pclass == DUMMY_PCLASS || m->pclass == (DUMMY_PCLASS | l)) |
&& lev->dummy == -1) |
return 0; |
else |
return -1; |
//kern_printf("DUMMYsched!!! %d", lev->dummy); |
return lev->dummy; |
} |
static int dummy_level_accept_guest_model(LEVEL l, TASK_MODEL *m) |
static int dummy_public_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
return -1; |
} |
static void dummy_level_status(LEVEL l) |
{ |
dummy_level_des *lev = (dummy_level_des *)(level_table[l]); |
kern_printf("dummy PID: %d\n", lev->dummy); |
}; |
if (m->pclass != DUMMY_PCLASS) return -1; |
if (m->level != 0 && m->level != l) return -1; |
if (lev->dummy != -1) return -1; |
static PID dummy_level_scheduler(LEVEL l) |
{ |
dummy_level_des *lev = (dummy_level_des *)(level_table[l]); |
//kern_printf("DUMMYsched!!! %d", lev->dummy); |
return lev->dummy; |
} |
/* There is not guarantee on this level!!! -> the entry must be null |
int (*level_guarantee)(LEVEL l, DWORD *freebandwidth); */ |
static int dummy_task_create(LEVEL l, PID p, TASK_MODEL *m) |
{ |
/* the dummy level doesn't introduce any new field in the TASK_MODEL |
so, all initialization stuffs are done by the task_create. |
the task state is set at SLEEP by the general task_create */ |
116,100 → 94,16 |
return 0; /* OK */ |
} |
static void dummy_task_detach(LEVEL l, PID p) |
static void dummy_public_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* the dummy level doesn't introduce any new field in the TASK_MODEL |
so, all detach stuffs are done by the task_create |
The task state is set at FREE by the general task_create */ |
} |
static int dummy_task_eligible(LEVEL l, PID p) |
{ |
return 0; /* if the task p is chosen, it is always eligible */ |
} |
extern int testactive; |
extern struct timespec s_stime[]; |
extern TIME s_curr[]; |
extern TIME s_PID[]; |
extern int useds; |
static void dummy_task_dispatch(LEVEL l, PID p, int nostop) |
{ |
/* nothing... the dummy hangs the cpu waiting for interrupts... */ |
if (0)//testactive) |
{ |
s_stime[useds]= schedule_time; |
s_curr[useds] = -1; |
s_PID[useds] = p; |
useds++; |
} |
//kern_printf("ÛDUMMYÛ"); |
} |
static void dummy_task_epilogue(LEVEL l, PID p) |
static void dummy_public_epilogue(LEVEL l, PID p) |
{ |
proc_table[p].status = SLEEP; /* Paranoia */ |
} |
static void dummy_task_activate(LEVEL l, PID p) |
{ kern_printf("Dummy1"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static void dummy_task_insert(LEVEL l, PID p) |
{ kern_printf("Dummy2"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static void dummy_task_extract(LEVEL l, PID p) |
{ kern_printf("Dummy3"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static void dummy_task_endcycle(LEVEL l, PID p) |
{ kern_printf("Dummy4"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static void dummy_task_end(LEVEL l, PID p) |
{ kern_printf("Dummy5"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static void dummy_task_sleep(LEVEL l, PID p) |
{ kern_printf("Dummy6"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static void dummy_task_delay(LEVEL l, PID p, TIME tickdelay) |
{ kern_printf("Dummy7"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); } |
static int dummy_guest_create(LEVEL l, PID p, TASK_MODEL *m) |
{ kern_printf("Dummy8"); kern_raise(XUNVALID_GUEST,exec_shadow); return 0; } |
static void dummy_guest_detach(LEVEL l, PID p) |
{ kern_printf("Dummy9"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_dispatch(LEVEL l, PID p, int nostop) |
{ kern_printf("Dummy0"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_epilogue(LEVEL l, PID p) |
{ kern_printf("Dummya"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_activate(LEVEL l, PID p) |
{ kern_printf("Dummyb"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_insert(LEVEL l, PID p) |
{ kern_printf("Dummyc"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_extract(LEVEL l, PID p) |
{ kern_printf("Dummyd"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_endcycle(LEVEL l, PID p) |
{ kern_printf("Dummye"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_end(LEVEL l, PID p) |
{ kern_printf("Dummyf"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_sleep(LEVEL l, PID p) |
{ kern_printf("Dummyg"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
static void dummy_guest_delay(LEVEL l, PID p,DWORD tickdelay) |
{ kern_printf("Dummyh"); kern_raise(XUNVALID_GUEST,exec_shadow); } |
/*+ Dummy task must be present & cannot be killed; +*/ |
static TASK dummy() |
{ |
252,7 → 146,7 |
if (p == NIL) |
printk("\nPanic!!! can't create dummy task...\n"); |
/* dummy must block all tasks... */ |
/* dummy must block all signals... */ |
proc_table[p].sigmask = 0xFFFFFFFF; |
} |
261,57 → 155,27 |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void dummy_register_level() |
LEVEL dummy_register_level() |
{ |
LEVEL l; /* the level that we register */ |
dummy_level_des *lev; /* for readableness only */ |
printk("Entro in dummy_register_level\n"); |
printk("Inside dummy_register_level\n"); |
/* request an entry in the level_table */ |
l = level_alloc_descriptor(); |
l = level_alloc_descriptor(sizeof(dummy_level_des)); |
/* alloc the space needed for the dummy_level_des */ |
lev = (dummy_level_des *)kern_alloc(sizeof(dummy_level_des)); |
lev = (dummy_level_des *)level_table[l]; |
/* update the level_table with the new entry */ |
level_table[l] = (level_des *)lev; |
printk(" lev=%d\n",(int)lev); |
/* fill the standard descriptor */ |
strncpy(lev->l.level_name, DUMMY_LEVELNAME, MAX_LEVELNAME); |
lev->l.level_code = DUMMY_LEVEL_CODE; |
lev->l.level_version = DUMMY_LEVEL_VERSION; |
lev->l.public_scheduler = dummy_public_scheduler; |
lev->l.public_guarantee = NULL; |
lev->l.public_create = dummy_public_create; |
lev->l.public_dispatch = dummy_public_dispatch; |
lev->l.public_epilogue = dummy_public_epilogue; |
lev->l.level_accept_task_model = dummy_level_accept_task_model; |
lev->l.level_accept_guest_model = dummy_level_accept_guest_model; |
lev->l.level_status = dummy_level_status; |
lev->l.level_scheduler = dummy_level_scheduler; |
lev->l.level_guarantee = NULL; /* No guarantee! */ |
lev->l.task_create = dummy_task_create; |
lev->l.task_detach = dummy_task_detach; |
lev->l.task_eligible = dummy_task_eligible; |
lev->l.task_dispatch = dummy_task_dispatch; |
lev->l.task_epilogue = dummy_task_epilogue; |
lev->l.task_activate = dummy_task_activate; |
lev->l.task_insert = dummy_task_insert; |
lev->l.task_extract = dummy_task_extract; |
lev->l.task_endcycle = dummy_task_endcycle; |
lev->l.task_end = dummy_task_end; |
lev->l.task_sleep = dummy_task_sleep; |
lev->l.task_delay = dummy_task_delay; |
lev->l.guest_create = dummy_guest_create; |
lev->l.guest_detach = dummy_guest_detach; |
lev->l.guest_dispatch = dummy_guest_dispatch; |
lev->l.guest_epilogue = dummy_guest_epilogue; |
lev->l.guest_activate = dummy_guest_activate; |
lev->l.guest_insert = dummy_guest_insert; |
lev->l.guest_extract = dummy_guest_extract; |
lev->l.guest_endcycle = dummy_guest_endcycle; |
lev->l.guest_end = dummy_guest_end; |
lev->l.guest_sleep = dummy_guest_sleep; |
lev->l.guest_delay = dummy_guest_delay; |
/* the dummy process will be created at init_time. |
see also dummy_level_accept_model,dummy_create */ |
lev->dummy = -1; |
319,4 → 183,6 |
printk("\tPosto dummy_create\n"); |
sys_atrunlevel(dummy_create,(void *) l, RUNLEVEL_INIT); |
return l; |
} |
/shark/tags/rel_0_3/kernel/modules/nop.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: nop.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: nop.c,v 1.3 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
Binary Semaphores. see nop.h for more details... |
58,7 → 58,6 |
#include <ll/string.h> |
#include <kernel/const.h> |
#include <sys/types.h> |
#include <modules/codes.h> |
#include <kernel/descr.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
73,7 → 72,7 |
mutex_t structure */ |
typedef struct { |
PID owner; |
QQUEUE blocked; |
IQUEUE blocked; |
} NOP_mutex_t; |
80,40 → 79,21 |
/* Wait status for this library */ |
#define NOP_WAIT LIB_STATUS_BASE |
/*+ print resource protocol statistics...+*/ |
static void NOP_resource_status(RLEVEL r) |
static int NOP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
kern_printf("No status for NOP module\n"); |
} |
static int NOP_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
{ |
/* priority inheritance works with all tasks without Resource parameters */ |
return -1; |
} |
static void NOP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* never called!!! */ |
} |
static void NOP_res_detach(RLEVEL l, PID p) |
{ |
} |
static int NOP_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a) |
{ |
if (a->mclass == NOP_MCLASS || a->mclass == (NOP_MCLASS | l) ) |
return 0; |
else |
return -1; |
} |
static int NOP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
NOP_mutex_t *p; |
if (a->mclass != NOP_MCLASS) |
return -1; |
p = (NOP_mutex_t *) kern_alloc(sizeof(NOP_mutex_t)); |
124,7 → 104,7 |
return (ENOMEM); |
p->owner = NIL; |
qq_init(&p->blocked); |
iq_init(&p->blocked, &freedesc, 0); |
m->mutexlevel = l; |
m->opt = (void *)p; |
172,27 → 152,16 |
if (p->owner != NIL) { /* We must block exec task */ |
LEVEL l; /* for readableness only */ |
TIME tx; /* a dummy TIME for timespec operations */ |
struct timespec ty; /* a dummy timespec for timespec operations */ |
proc_table[exec_shadow].context = kern_context_save(); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task in the semaphore queue */ |
proc_table[exec_shadow].status = NOP_WAIT; |
qq_insertlast(exec_shadow,&p->blocked); |
iq_insertlast(exec_shadow,&p->blocked); |
/* and finally we reschedule */ |
exec = exec_shadow = -1; |
253,10 → 222,10 |
proc_table[exec_shadow].context = kern_context_save(); |
/* the mutex is mine, pop the firsttask to extract */ |
p->owner = qq_getfirst(&p->blocked); |
p->owner = iq_getfirst(&p->blocked); |
if (p->owner != NIL) { |
l = proc_table[p->owner].task_level; |
level_table[l]->task_insert(l,p->owner); |
level_table[l]->public_unblock(l,p->owner); |
} |
scheduler(); |
265,7 → 234,7 |
return 0; |
} |
void NOP_register_module(void) |
RLEVEL NOP_register_module(void) |
{ |
RLEVEL l; /* the level that we register */ |
NOP_mutex_resource_des *m; /* for readableness only */ |
282,20 → 251,11 |
resource_table[l] = (resource_des *)m; |
/* fill the resource_des descriptor */ |
strncpy(m->m.r.res_name, NOP_MODULENAME, MAX_MODULENAME); |
m->m.r.res_code = NOP_MODULE_CODE; |
m->m.r.res_version = NOP_MODULE_VERSION; |
m->m.r.rtype = MUTEX_RTYPE; |
m->m.r.resource_status = NOP_resource_status; |
m->m.r.level_accept_resource_model = NOP_level_accept_resource_model; |
m->m.r.res_register = NOP_res_register; |
m->m.r.res_detach = NOP_res_detach; |
/* fill the mutex_resource_des descriptor */ |
m->m.level_accept_mutexattr = NOP_level_accept_mutexattr; |
m->m.init = NOP_init; |
m->m.destroy = NOP_destroy; |
m->m.lock = NOP_lock; |
302,5 → 262,6 |
m->m.trylock = NOP_trylock; |
m->m.unlock = NOP_unlock; |
return l; |
} |
/shark/tags/rel_0_3/kernel/modules/npp.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: npp.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: npp.c,v 1.2 2003-01-07 17:07:50 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:07:50 $ |
------------ |
Non Preemptive Protocol. see npp.h for more details... |
56,7 → 56,6 |
#include <ll/ll.h> |
#include <ll/string.h> |
#include <ll/stdio.h> |
#include <modules/codes.h> |
#include <kernel/const.h> |
#include <sys/types.h> |
#include <kernel/descr.h> |
71,6 → 70,7 |
} NPP_mutex_resource_des; |
#if 0 |
/*+ print resource protocol statistics...+*/ |
static void NPP_resource_status(RLEVEL r) |
{ |
78,18 → 78,14 |
kern_printf("%d Resources owned by the tasks %d\n", m->nlocked, exec_shadow); |
} |
#endif |
static int NPP_level_accept_resource_model(RLEVEL l, RES_MODEL *r) |
static int NPP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* NPP works with all tasks without Resource parameters */ |
return -1; |
} |
static void NPP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
{ |
/* never called!!! */ |
} |
static void NPP_res_detach(RLEVEL l, PID p) |
{ |
NPP_mutex_resource_des *m = (NPP_mutex_resource_des *)(resource_table[l]); |
98,16 → 94,11 |
kern_raise(XMUTEX_OWNER_KILLED, p); |
} |
static int NPP_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a) |
static int NPP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
if (a->mclass == NPP_MCLASS || a->mclass == (NPP_MCLASS | l) ) |
return 0; |
else |
if (a->mclass != NPP_MCLASS) |
return -1; |
} |
static int NPP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
{ |
m->mutexlevel = l; |
m->opt = (void *)NIL; |
187,20 → 178,11 |
resource_table[l] = (resource_des *)m; |
/* fill the resource_des descriptor */ |
strncpy(m->m.r.res_name, NPP_MODULENAME, MAX_MODULENAME); |
m->m.r.res_code = NPP_MODULE_CODE; |
m->m.r.res_version = NPP_MODULE_VERSION; |
m->m.r.rtype = MUTEX_RTYPE; |
m->m.r.resource_status = NPP_resource_status; |
m->m.r.level_accept_resource_model = NPP_level_accept_resource_model; |
m->m.r.res_register = NPP_res_register; |
m->m.r.res_detach = NPP_res_detach; |
/* fill the mutex_resource_des descriptor */ |
m->m.level_accept_mutexattr = NPP_level_accept_mutexattr; |
m->m.init = NPP_init; |
m->m.destroy = NPP_destroy; |
m->m.lock = NPP_lock; |
/shark/tags/rel_0_3/kernel/modules/hartport.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: hartport.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: hartport.c,v 1.3 2002-11-11 08:32:06 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2002-11-11 08:32:06 $ |
------------ |
This file contains the Hartik 3.3.1 Port functions |
110,8 → 110,8 |
struct hash_port htable[MAX_HASH_ENTRY]; |
struct port_ker port_des[MAX_PORT]; |
struct port_com port_int[MAX_PORT_INT]; |
QUEUE freeportdes; |
QUEUE freeportint; |
int freeportdes; |
int freeportint; |
static int port_installed = 0; |
548,7 → 548,7 |
return -1; |
} |
if (!pd->valid) { |
errno = EPORT_UNVALID_DESCR; |
errno = EPORT_INVALID_DESCR; |
return -1; |
} |
596,7 → 596,7 |
return -1; |
} |
if (!pd->valid) { |
errno = EPORT_UNVALID_DESCR; |
errno = EPORT_INVALID_DESCR; |
return -1; |
} |
#endif |
/shark/tags/rel_0_3/kernel/modules/trcudp.c |
---|
6,7 → 6,9 |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors : |
* Paolo Gai <pj@gandalf.sssup.it> |
* Massimiliano Giorgi <massy@gandalf.sssup.it> |
* Luca Abeni <luca@gandalf.sssup.it> |
* (see the web pages for full authors list) |
* |
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
16,6 → 18,26 |
* http://shark.sssup.it |
*/ |
/* |
* Copyright (C) 2002 Paolo Gai |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* |
* CVS : $Id: trcudp.c,v 1.3 2002-10-28 10:11:38 pj Exp $ |
*/ |
#include <ll/sys/types.h> |
#include <ll/stdlib.h> |
23,73 → 45,217 |
#include <kernel/mem.h> |
#include <kernel/log.h> |
#include <drivers/udpip.h> |
#include <trace/types.h> |
#include <trace/trace.h> |
#include <trace/queues.h> |
#include <fs/fs.h> |
//#define DEBUG_TRCUDP |
#include <unistd.h> |
#include <fcntl.h> |
#include <limits.h> |
#define TRCUDP_MAXEVENTS (1500/sizeof(trc_event_t)) |
//#define TRCUDP_MAXEVENTS 10 |
typedef struct TAGudp_queue_t { |
UDP_ADDR addr; |
trc_event_t evt; |
} udp_queue_t; |
/* Well... this file is very similar to trccirc.c! */ |
static trc_event_t *udp_get(udp_queue_t *queue) |
typedef struct TAGtrcudp_queue_t { |
/*+ size of the queue +*/ |
int size; |
/*+ index of the next insertion into the queue +*/ |
int index; |
/*+ index of the next item to write (if online_tracer activated) +*/ |
int windex; |
/*+ number of events lost (if online_tracer activated) +*/ |
long hoops; |
/*+ local and remote IP numbers +*/ |
UDP_ADDR local, remote; |
/*+ unique number that identify the queue +*/ |
int uniq; |
/*+ =1 when the system shuts down +*/ |
int mustgodown; |
TASK_MODEL *m; |
/*+ dummy, needed for creating a valid packet (dirty trick ;-) +*/ |
short int dummy; |
/*+ events table +*/ |
trc_event_t table[0]; |
} trcudp_queue_t; |
static TASK online_tracer(trcudp_queue_t *queue) |
{ |
return &queue->evt; |
int s; /* the socket */ |
int newwindex; /* new write index after sending the packet */ |
int n; /* number of packets to send */ |
short int *pkt; |
s = udp_bind(&queue->local, NULL); |
for (;;) { |
if (queue->index<queue->windex) { |
if (queue->windex+TRCUDP_MAXEVENTS < queue->size) { |
newwindex = queue->windex+TRCUDP_MAXEVENTS; |
n = TRCUDP_MAXEVENTS; |
} else { |
newwindex = 0; |
n = queue->size-queue->windex; |
} |
} else { |
if (queue->windex+TRCUDP_MAXEVENTS < queue->index) { |
newwindex = queue->windex+TRCUDP_MAXEVENTS; |
n = TRCUDP_MAXEVENTS; |
} else { |
newwindex = queue->index; |
n = queue->index-queue->windex; |
} |
} |
if (n) { |
/* set the number of events into the UDP packet. It works |
because the event entry before windex is always empty, or |
because we use the dummy field into the struct */ |
pkt = ((short int *)(queue->table+queue->windex))-1; |
*pkt = (short int)n; |
udp_sendto(s,(char *)pkt, |
n*sizeof(trc_event_t)+2,&queue->remote); |
#ifdef DEBUG_TRCUDP |
printk(KERN_DEBUG "UDP: SEND %d events," |
" index %d windex %d new %d!!!\n",n, |
queue->index, queue->windex, newwindex); |
#endif |
queue->windex = newwindex; |
} |
if (queue->mustgodown) { |
if (queue->windex == queue->index) |
break; |
} |
else |
task_endcycle(); |
} |
return NULL; |
} |
static int udp_post(udp_queue_t *queue) |
static trc_event_t *trcudp_get(trcudp_queue_t *queue) |
{ |
//int s=0; |
/* s ??? */ |
//udp_sendto(s,&queue->evt,sizeof(trc_event_t),&queue->addr); |
if (queue->mustgodown) |
return NULL; |
if (queue->index==queue->size-1) { |
if (queue->windex==0) { |
queue->hoops++; |
return NULL; |
} |
queue->index=0; |
return &queue->table[queue->size-1]; |
} |
if (queue->index+1==queue->windex) { |
queue->hoops++; |
return NULL; |
} |
return &queue->table[queue->index++]; |
} |
static int trcudp_post(trcudp_queue_t *queue) |
{ |
return 0; |
} |
static int udp_create(trc_queue_t *queue, TRC_UDP_PARMS *args) |
static void trcudp_shutdown(trcudp_queue_t *queue); |
static int trcudp_create(trc_queue_t *p, TRC_UDP_PARMS *args) |
{ |
udp_queue_t *ptr; |
trcudp_queue_t *queue; |
if (args==NULL) return -1; |
if (args==NULL) { |
printk(KERN_ERR "trcudp_create: you must specify a non-NULL parameter!"); |
return -1; |
} |
ptr=(udp_queue_t*)kern_alloc(sizeof(udp_queue_t)); |
if (ptr==NULL) return -1; |
queue->get=(trc_event_t*(*)(void*))udp_get; |
queue->post=(int(*)(void*))udp_post; |
queue->data=ptr; |
queue=(trcudp_queue_t*)kern_alloc(sizeof(trcudp_queue_t)+ |
sizeof(trc_event_t)*args->size); |
if (queue==NULL) { |
printk(KERN_ERR "trcudp_create: error during memory allocation!"); |
return -1; |
} |
memcpy(&ptr->addr,&args->addr,sizeof(UDP_ADDR)); |
p->get=(trc_event_t*(*)(void*))trcudp_get; |
p->post=(int(*)(void*))trcudp_post; |
p->data=queue; |
queue->size=args->size; |
queue->windex=queue->index=0; |
queue->hoops=0; |
queue->local=args->local; |
queue->remote=args->remote; |
/* uniq initialized in trcudp_activate */ |
queue->mustgodown=0; |
queue->m = args->model; |
/* dummy unused */ |
/* AFTER exit because in that way we can hope to be back in text mode... */ |
sys_atrunlevel((void (*)(void *))trcudp_shutdown, (void *)queue, RUNLEVEL_AFTER_EXIT); |
return 0; |
} |
static int udp_activate(udp_queue_t *queue) |
static int trcudp_activate(trcudp_queue_t *queue, int uniq) |
{ |
SOFT_TASK_MODEL model; |
TASK_MODEL *m; |
PID pid; |
queue->uniq=uniq; |
if (!queue->m) { |
soft_task_default_model(model); |
soft_task_def_system(model); |
/* soft_task_def_notrace(model); Should we trace the tracer? */ |
soft_task_def_periodic(model); |
soft_task_def_period(model,250000); |
soft_task_def_met(model,10000); |
soft_task_def_wcet(model,10000); |
/* soft_task_def_nokill(model); NOOOOOOO!!!! */ |
soft_task_def_arg(model,queue); |
m = (TASK_MODEL *)&model; |
} |
else { |
m = queue->m; |
task_def_arg(*m,queue); |
} |
pid=task_create("trcUDP",online_tracer,m,NULL); |
if (pid==-1) { |
printk(KERN_ERR "can't start tracer online trcudp trace task"); |
} else |
task_activate(pid); |
return 0; |
} |
static int udp_terminate(udp_queue_t *queue) |
static int trcudp_terminate(trcudp_queue_t *queue) |
{ |
queue->mustgodown = 1; |
return 0; |
} |
static void trcudp_shutdown(trcudp_queue_t *queue) |
{ |
printk(KERN_NOTICE "tracer: %li events lost into UDP queue %d", |
queue->hoops, queue->uniq); |
} |
int trc_register_udp_queue(void) |
{ |
int res; |
res=trc_register_queuetype(TRC_UDP_QUEUE, |
(int(*)(trc_queue_t*,void*))udp_create, |
(int(*)(void*))udp_activate, |
(int(*)(void*))udp_terminate |
); |
if (res!=0) printk(KERN_WARNING "can't register tracer udp queue"); |
(int(*)(trc_queue_t*,void*))trcudp_create, |
(int(*)(void*,int))trcudp_activate, |
(int(*)(void*))trcudp_terminate |
); |
if (res!=0) printk(KERN_WARNING "can't register tracer trcudp queue"); |
return res; |
} |
/shark/tags/rel_0_3/kernel/modules/cabs.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: cabs.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: cabs.c,v 1.2 2002-10-28 07:55:54 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2002-10-28 07:55:54 $ |
------------ |
Date: 2/7/96 |
95,7 → 95,7 |
static int checkcab(CAB id) |
{ |
if (id >= MAX_CAB) { |
errno = ECAB_UNVALID_ID; |
errno = ECAB_INVALID_ID; |
return -1; |
} |
if (cabs[id].busy == TRUE) return TRUE; |
117,7 → 117,7 |
} |
cabs[MAX_CAB-1].next_cab_free = NIL; |
cabs[MAX_CAB-1].busy = FALSE; |
// for (i = CAB_UNVALID_MSG_NUM; i <= CAB_CLOSED; i++) |
// for (i = CAB_INVALID_MSG_NUM; i <= CAB_CLOSED; i++) |
// exc_set(i,cab_exception); |
} |
139,7 → 139,7 |
/* solleva l'eccezioni */ |
if (num_mes < 1) { |
errno = ECAB_UNVALID_MSG_NUM; |
errno = ECAB_INVALID_MSG_NUM; |
kern_frestore(f); |
return -1; |
} |
/shark/tags/rel_0_3/kernel/modules/makefile |
---|
42,10 → 42,10 |
TRC_OBJ = trace.o \ |
trcdummy.o \ |
trcfixed.o \ |
trccirc.o |
trccirc.o \ |
trcdfix.o \ |
trcudp.o |
# trcudp.o |
OBJS = $(SCHED_OBJ) $(APER_OBJ) $(RES_OBJ) $(TRC_OBJ) |
include $(BASE)/config/lib.mk |
/shark/tags/rel_0_3/kernel/modules/trcdfix.c |
---|
0,0 → 1,152 |
/* |
* Project: S.Ha.R.K. |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors : |
* Massimiliano Giorgi <massy@gandalf.sssup.it> |
* (see the web pages for full authors list) |
* |
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
*/ |
#include <ll/sys/types.h> |
#include <ll/stdlib.h> |
#include <kernel/func.h> |
#include <kernel/mem.h> |
#include <kernel/log.h> |
#include <trace/types.h> |
#include <trace/trace.h> |
#include <trace/queues.h> |
#include <ll/i386/x-dos.h> |
/* this file implement a fixed queue, that is simply an array that |
is filled with the events until it is full. After that, all the other |
events are discarded. It uses the DOSFS Filesystem to write all the data |
This file is derived from the trcfixed.c file; I used a different file |
because including trcfixed.c in the executable would have implied the |
linking of all the filesystem... |
*/ |
typedef struct TAGfixed_queue_t { |
int size; |
int index; |
char *filename; |
int uniq; |
trc_event_t table[0]; |
/* Yes, 0!... the elements are allocated |
in a dirty way into the kern_alloc into fixed_create */ |
} dosfs_fixed_queue_t; |
/* This function simply return an event to fill (only if the fixed table |
is not yet full) */ |
static trc_event_t *dosfs_fixed_get(dosfs_fixed_queue_t *queue) |
{ |
if (queue->index>=queue->size) return NULL; |
return &queue->table[queue->index++]; |
} |
/* since get returns the correct event address, |
the post function does nothing... */ |
static int dosfs_fixed_post(dosfs_fixed_queue_t *queue) |
{ |
return 0; |
} |
static TRC_FIXED_PARMS defaultargs; |
static int once=0; |
static void dosfs_fixed_flush(void *arg); |
static int dosfs_fixed_create(trc_queue_t *queue, TRC_FIXED_PARMS *args) |
{ |
dosfs_fixed_queue_t *ptr; |
/* initialize the default arguments for the fixed queue */ |
if (!once) { |
/* well... this func is called when the system is not running! */ |
once=1; |
trc_fixed_default_parms(defaultargs); |
} |
if (args==NULL) args=&defaultargs; |
/* allocate the fixed queue data structure plus the array of events */ |
ptr=(dosfs_fixed_queue_t*)kern_alloc(sizeof(dosfs_fixed_queue_t)+ |
sizeof(trc_event_t)*(args->size+1)); |
if (ptr==NULL) return -1; |
/* set the current queue pointers and data */ |
queue->get=(trc_event_t*(*)(void*))dosfs_fixed_get; |
queue->post=(int(*)(void*))dosfs_fixed_post; |
queue->data=ptr; |
ptr->size=args->size; |
ptr->index=0; |
ptr->filename=args->filename; |
/* prepare for shutdown ;-) */ |
sys_atrunlevel(dosfs_fixed_flush, (void *)ptr, RUNLEVEL_AFTER_EXIT); |
return 0; |
} |
static void dosfs_fixed_flush(void *arg) |
{ |
DOS_FILE *f; |
dosfs_fixed_queue_t *queue = (dosfs_fixed_queue_t *)arg; |
char pathname[100]; /* it should be PATH_MAX, but we do not use the |
filesystem, so the symbol is not defined */ |
if (queue->filename==NULL) trc_create_name("fix",queue->uniq,pathname); |
else trc_create_name(queue->filename,0,pathname); |
printk(KERN_DEBUG "tracer flush index= %d pathname=%s\n", |
queue->index, pathname); |
f = DOS_fopen(pathname,"w"); |
DOS_fwrite(queue->table,1,queue->index*sizeof(trc_event_t),f); |
DOS_fclose(f); |
} |
static int dosfs_fixed_activate(dosfs_fixed_queue_t *queue, int uniq) |
{ |
queue->uniq=uniq; |
return 0; |
} |
static int dosfs_fixed_terminate(dosfs_fixed_queue_t *queue) |
{ |
return 0; |
} |
int trc_register_dosfs_fixed_queue(void) |
{ |
int res; |
res=trc_register_queuetype(TRC_DOSFS_FIXED_QUEUE, |
(int(*)(trc_queue_t*,void*))dosfs_fixed_create, |
(int(*)(void*,int))dosfs_fixed_activate, |
(int(*)(void*))dosfs_fixed_terminate |
); |
if (res!=0) printk(KERN_WARNING "can't register tracer DOSFS fixed queue"); |
return res; |
} |
/shark/tags/rel_0_3/kernel/int_sem.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: int_sem.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: int_sem.c,v 1.3 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
Internal semaphores. |
68,7 → 68,7 |
void internal_sem_init(internal_sem_t *s, int value) |
{ |
s->count = value; |
qq_init(&s->blocked); |
iq_init(&s->blocked,&freedesc,0); |
} |
void internal_sem_wait(internal_sem_t *s) |
86,27 → 86,17 |
} |
else { /* We must block exec task */ |
LEVEL l; /* for readableness only */ |
TIME tx; /* a dummy TIME for timespec operations */ |
struct timespec ty; /* a dummy timespec for timespec operations */ |
proc_table[exec_shadow].context = kern_context_save(); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task in the semaphore queue */ |
proc_table[exec_shadow].status = INTERNAL_SEM_WAIT; |
qq_insertlast(exec_shadow,&s->blocked); |
iq_insertlast(exec_shadow,&s->blocked); |
/* and finally we reschedule */ |
exec = exec_shadow = -1; |
148,9 → 138,9 |
register PID p; |
register LEVEL l; |
p = qq_getfirst(&s->blocked); |
p = iq_getfirst(&s->blocked); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
scheduler(); |
} |
/shark/tags/rel_0_3/kernel/activate.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: activate.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: activate.c,v 1.3 2003-01-07 17:07:48 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:48 $ |
------------ |
task_activate & group_activate |
73,11 → 73,11 |
/* some controls on the task p */ |
if (p<0 || p>=MAX_PROC) { |
errno = EUNVALID_TASK_ID; |
errno = EINVALID_TASK_ID; |
return -1; |
} |
if (proc_table[p].status == FREE) { |
errno = EUNVALID_TASK_ID; |
errno = EINVALID_TASK_ID; |
return -1; |
} |
91,7 → 91,7 |
proc_table[p].frozen_activations++; |
else { |
l = proc_table[p].task_level; |
level_table[l]->task_activate(l,p); |
level_table[l]->public_activate(l,p); |
} |
kern_frestore(f); |
return 0; |
106,7 → 106,7 |
proc_table[p].frozen_activations++; |
else { |
l = proc_table[p].task_level; |
level_table[l]->task_activate(l,p); |
level_table[l]->public_activate(l,p); |
event_need_reschedule(); |
} |
kern_frestore(f); |
120,7 → 120,7 |
/* tracer stuff */ |
trc_logevent(TRC_ACTIVATE,&p); |
l = proc_table[p].task_level; |
level_table[l]->task_activate(l,p); |
level_table[l]->public_activate(l,p); |
/* Preempt if necessary */ |
scheduler(); |
142,7 → 142,7 |
register LEVEL l; /* a level value */ |
if (g == 0) { |
errno = EUNVALID_GROUP; |
errno = EINVALID_GROUP; |
return -1; |
} |
162,7 → 162,7 |
/* tracer stuff */ |
trc_logevent(TRC_ACTIVATE,&i); |
l = proc_table[i].task_level; |
level_table[l]->task_activate(l,i); |
level_table[l]->public_activate(l,i); |
} |
kern_frestore(f); |
181,7 → 181,7 |
/* tracer stuff */ |
trc_logevent(TRC_ACTIVATE,&i); |
l = proc_table[i].task_level; |
level_table[l]->task_activate(l,i); |
level_table[l]->public_activate(l,i); |
} |
event_need_reschedule(); |
kern_frestore(f); |
196,7 → 196,7 |
continue; |
} |
l = proc_table[i].task_level; |
level_table[l]->task_activate(l,i); |
level_table[l]->public_activate(l,i); |
/* tracer stuff */ |
trc_logevent(TRC_ACTIVATE,&i); |
} |
/shark/tags/rel_0_3/kernel/mqueue.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: mqueue.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: mqueue.c,v 1.3 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
POSIX message queues |
90,8 → 90,8 |
correct bit is set */ |
/* the blocked processes queues */ |
QQUEUE blocked_send; |
QQUEUE blocked_rcv; |
IQUEUE blocked_send; |
IQUEUE blocked_rcv; |
int next; /* the mq queue */ |
} mq_table[MQ_OPEN_MAX]; |
105,7 → 105,7 |
if the task is not blocked...) */ |
} mqproc_table[MAX_PROC]; |
static QUEUE free_mq; /* Queue of free sem */ |
static int free_mq; /* Queue of free sem */ |
mqd_t mq_open(const char *name, int oflag, ...) |
{ |
168,8 → 168,8 |
mq_table[mq].maxmsg = MQ_DEFAULT_MAXMSG; |
mq_table[mq].msgsize = MQ_DEFAULT_MSGSIZE; |
} |
qq_init(&mq_table[mq].blocked_send); |
qq_init(&mq_table[mq].blocked_rcv); |
iq_init(&mq_table[mq].blocked_send, &freedesc, 0); |
iq_init(&mq_table[mq].blocked_rcv, &freedesc, 0); |
mq_table[mq].count = 0; |
mq_table[mq].start = -1; |
320,12 → 320,12 |
/* the task that have to be killed is waiting on a mq_send */ |
/* we have to extract the task from the blocked queue... */ |
qq_extract(i,&mq_table[mqproc_table[i].mqdes].blocked_send); |
iq_extract(i,&mq_table[mqproc_table[i].mqdes].blocked_send); |
/* and the task have to be reinserted into the ready queues, so it |
will fall into task_testcancel */ |
l = proc_table[i].task_level; |
level_table[l]->task_insert(l,i); |
level_table[l]->public_unblock(l,i); |
return 1; |
} |
334,12 → 334,12 |
/* the task that have to be killed is waiting on a mq_send */ |
/* we have to extract the task from the blocked queue... */ |
qq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_rcv); |
iq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_rcv); |
/* and the task have to be reinserted into the ready queues, so it |
will fall into task_testcancel */ |
l = proc_table[i].task_level; |
level_table[l]->task_insert(l,i); |
level_table[l]->public_unblock(l,i); |
return 1; |
} |
357,12 → 357,12 |
mqproc_table[exec_shadow].intsig = 1; |
/* we have to extract the task from the blocked queue... */ |
qq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_send); |
iq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_send); |
/* and the task have to be reinserted into the ready queues, so it |
will fall into task_testcancel */ |
l = proc_table[i].task_level; |
level_table[l]->task_insert(l,i); |
level_table[l]->public_unblock(l,i); |
return 1; |
} |
373,12 → 373,12 |
mqproc_table[exec_shadow].intsig = 1; |
/* we have to extract the task from the blocked queue... */ |
qq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_rcv); |
iq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_rcv); |
/* and the task have to be reinserted into the ready queues, so it |
will fall into task_testcancel */ |
l = proc_table[i].task_level; |
level_table[l]->task_insert(l,i); |
level_table[l]->public_unblock(l,i); |
return 1; |
} |
437,8 → 437,6 |
} |
else { |
LEVEL l; |
struct timespec ty; |
TIME tx; |
/* we block the task until: |
- a message is received, or |
447,23 → 445,14 |
mqproc_table[exec_shadow].intsig = 0; |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task in the message queue */ |
proc_table[exec_shadow].status = WAIT_MQSEND; |
qq_insert(exec_shadow,&mq_table[mqdes].blocked_send); |
iq_priority_insert(exec_shadow,&mq_table[mqdes].blocked_send); |
/* and finally we reschedule */ |
exec = exec_shadow = -1; |
504,7 → 493,7 |
/* the mq was empty */ |
PID p; |
p = qq_getfirst(&mq_table[mqdes].blocked_rcv); |
p = iq_getfirst(&mq_table[mqdes].blocked_rcv); |
if ( p != NIL) { |
/* The first blocked task has to be woken up */ |
513,7 → 502,7 |
proc_table[exec_shadow].context = ll_context_from(); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
/* Preempt if necessary */ |
scheduler(); |
603,8 → 592,6 |
} |
else { |
LEVEL l; |
struct timespec ty; |
TIME tx; |
/* we block the task until: |
- a message arrives, or |
613,23 → 600,14 |
mqproc_table[exec_shadow].intsig = 0; |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task into the message queue */ |
proc_table[exec_shadow].status = WAIT_MQRECEIVE; |
qq_insert(exec_shadow,&mq_table[mqdes].blocked_rcv); |
iq_priority_insert(exec_shadow,&mq_table[mqdes].blocked_rcv); |
/* and finally we reschedule */ |
exec = exec_shadow = -1; |
671,7 → 649,7 |
returnvalue = mq_table[mqdes].mq_info[ msg ].msglen; |
/* if the mq was full, there may be a task into blocked-send queue */ |
p = qq_getfirst(&mq_table[mqdes].blocked_send); |
p = iq_getfirst(&mq_table[mqdes].blocked_send); |
if ( p != NIL) { |
/* The first blocked task on send has to be woken up */ |
680,7 → 658,7 |
proc_table[exec_shadow].context = ll_context_from(); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
/* Preempt if necessary */ |
scheduler(); |
/shark/tags/rel_0_3/kernel/mutex.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: mutex.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: mutex.c,v 1.2 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
This file contains the mutex and condition variables handling functions. |
89,7 → 89,7 |
int mutex_init(mutex_t *mutex, const mutexattr_t *attr) |
{ |
RLEVEL l; |
int result = (EINVAL); |
int result; |
kern_cli(); |
mutex->mutexlevel = -1; |
101,14 → 101,13 |
mutex_resource_des *m = (mutex_resource_des *)resource_table[l]; |
/* can the mutex level manage the mutexattr_t ? */ |
if (m->level_accept_mutexattr(l,attr) >=0) { |
result = m->init(l, mutex, attr); |
} |
if ((result = m->init(l, mutex, attr)) >=0) |
return result; |
} |
} |
kern_sti(); |
return result; |
return EINVAL; |
} |
/shark/tags/rel_0_3/kernel/init.c |
---|
18,24 → 18,16 |
/** |
------------ |
CVS : $Id: init.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: init.c,v 1.2 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
Kernel module registration and miscellaneous functions |
- Kernel module registration functions |
- miscellaneous functions related to module registration, system init and end |
This file contains: |
level_alloc_descriptor |
resource_alloc_descriptor |
__compute_args__ |
__call_main__ |
sys_atinit |
sys_atexit |
**/ |
/* |
72,6 → 64,10 |
#include <kernel/var.h> |
#include <kernel/func.h> |
/*********************************************************************** |
* Runlevel management |
***********************************************************************/ |
/*+ List of function to call at each rnlevel; |
they are posted with sys_atrunlevel +*/ |
static struct exit_func { |
199,23 → 195,177 |
return 0; |
} |
/*********************************************************************** |
* Level Default Descriptor |
***********************************************************************/ |
static void level_excfunc(LEVEL l) |
{ |
printk(KERN_EMERG "unreg scheduling function called, level=%d!\n", l); |
kern_raise(XINVALID_TASK, exec_shadow); |
} |
static int level_return1(void) { return 1; } |
static int level_returnminus1(void) { return -1; } |
static void level_nothing(void) { } |
static int level_return0(void) { return 0; } |
static level_des level_default_descriptor = |
{ |
(void (*)(LEVEL,PID,TASK_MODEL *))level_excfunc, /* private_insert */ |
(void (*)(LEVEL,PID)) level_excfunc, /* private_extract */ |
(int (*)(LEVEL,PID)) level_return0, /* private_eligible */ |
(void (*)(LEVEL,PID, int)) level_excfunc, /* private_dispatch */ |
(void (*)(LEVEL,PID)) level_excfunc, /* private_epilogue */ |
(PID (*)(LEVEL)) level_returnminus1, /* pubvlic_scheduler */ |
(int (*)(LEVEL,bandwidth_t *)) level_return1, /* public_guarantee */ |
(int (*)(LEVEL,PID,TASK_MODEL *))level_returnminus1, /* public_create */ |
(void (*)(LEVEL,PID)) level_nothing, /* public_detach */ |
(void (*)(LEVEL,PID)) level_excfunc, /* public_end */ |
(int (*)(LEVEL,PID)) level_return0, /* public_eligible */ |
(void (*)(LEVEL,PID, int)) level_excfunc, /* public_dispatch */ |
(void (*)(LEVEL,PID)) level_excfunc, /* public_epilogue */ |
(void (*)(LEVEL,PID)) level_excfunc, /* public_activate */ |
(void (*)(LEVEL,PID)) level_excfunc, /* public_unblock */ |
(void (*)(LEVEL,PID)) level_excfunc, /* public_block */ |
(int (*)(LEVEL,PID,void *)) level_excfunc, /* public_message */ |
}; |
/*********************************************************************** |
* Module registration |
***********************************************************************/ |
/* this function initializes all the data structures used by the level |
registration functions */ |
void levels_init(void) |
{ |
int l; |
for (l=0; l<MAX_SCHED_LEVEL; l++) { |
level_table[l] = &level_default_descriptor; |
level_used[l] = 0; |
level_next[l] = l+1; |
level_prev[l] = l-1; |
} |
level_next[MAX_SCHED_LEVEL-1l] = -1; |
level_prev[0] = -1; |
level_first = -1; |
level_last = -1; |
level_free = 0; |
} |
/*+ This function returns a level_des **. the value returned shall be |
used to register a level module. The function shall be called only at |
module registration time. It assume that the system is not yet |
initialized, so we shall not call sys_abort... +*/ |
LEVEL level_alloc_descriptor() |
used to register a level module. |
The function is usually called at module registration time. The |
function can also be called when the system is already started, to |
allow the implementation of dynamic module registration. |
The argument must be the size of the data block that have to be allocated |
The function returns the number of the descriptor allocated for the module |
or -1 in case there are no free descriptors. |
The function also reserves a descriptor with size s, initialized |
with default function pointers. |
+*/ |
LEVEL level_alloc_descriptor(size_t s) |
{ |
if (sched_levels == MAX_SCHED_LEVEL) |
{ |
printk("Too many scheduling levels!!!\n"); |
l1_exit(1); |
LEVEL l; |
/* try to find a free descriptor */ |
if (level_free == -1) |
return -1; |
/* alloc it */ |
l = level_free; |
level_free = level_next[l]; |
level_used[l] = 1; |
/* insert the module as the last in the scheduling module's list */ |
if (level_last == -1) { |
level_first = l; |
level_prev[l] = -1; |
} |
else { |
level_next[level_last] = l; |
level_prev[l] = level_last; |
} |
level_last = l; |
level_next[l] = -1; |
return sched_levels++; |
/* allocate the descriptor! */ |
if (s < sizeof(level_des)) |
s = sizeof(level_des); |
level_table[l] = (level_des *)kern_alloc(s); |
*(level_table[l]) = level_default_descriptor; |
level_size[l] = s; |
/* return the descriptor index */ |
return l; |
} |
/*+ This function release a level descriptor previously allocated using |
level_alloc_descriptor(). |
The function returns 0 if the level has been freed, or -1 if someone is |
using it, -2 if the level has never been registered. |
+*/ |
int level_free_descriptor(LEVEL l) |
{ |
if (level_used[l] == 0) |
return -2; |
else if (level_used[l] > 1) |
return -1; |
/* we can free the descriptor */ |
level_used[l] = 0; |
/* remove it from the "first" queue */ |
if (level_prev[l] == -1) |
level_first = level_next[l]; |
else |
level_next[level_prev[l]] = level_next[l]; |
if (level_next[l] == -1) |
level_last = level_prev[l]; |
else |
level_prev[level_next[l]] = level_prev[l]; |
/* ... and put it in the free queue */ |
level_prev[level_free] = l; |
level_next[l] = level_free; |
level_free = l; |
/* finally, free the memory allocated to it */ |
kern_free(level_table[l], level_size[l]); |
return 0; |
} |
/* Call this if you want to say that your module is using module l |
(e.g., for calling its private functions) */ |
int level_use_descriptor(LEVEL l) |
{ |
return ++level_used[l]; |
} |
/* Call this when you no more need the module l */ |
int level_unuse_descriptor(LEVEL l) |
{ |
return --level_used[l]; |
} |
/*+ This function returns a resource_des **. the value returned shall be |
used to register a resource module. The function shall be called only at |
module registration time. It assume that the system is not yet |
225,12 → 375,17 |
if (res_levels == MAX_RES_LEVEL) |
{ |
printk("Too many resource levels!!!\n"); |
l1_exit(1); |
sys_end(); |
} |
return res_levels++; |
} |
/*********************************************************************** |
* Parameter parsing (argc, argv) |
***********************************************************************/ |
/*+ This function compute the command line parameters from the multiboot_info |
NOTE: this function modify the multiboot struct, so this function and |
__call_main__ are mutually exclusives!!! +*/ |
/shark/tags/rel_0_3/kernel/conditio.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: conditio.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: conditio.c,v 1.3 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
This file contains the condition variables handling functions. |
59,6 → 59,7 |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <errno.h> |
#include <kernel/iqueue.h> |
/*---------------------------------------------------------------------*/ |
/* Condition variables */ |
76,13 → 77,13 |
/* if the task is waiting on a condition variable, we have to extract it |
from the waiters queue, then set the KILL_REQUEST flag, and reinsert |
the task into the ready queue so it can reaquire the mutex and die */ |
q_extract(i,&proc_table[i].cond_waiting->waiters); |
if (proc_table[i].cond_waiting->waiters == NIL) |
iq_extract(i,&proc_table[i].cond_waiting->waiters); |
if (iq_isempty(&proc_table[i].cond_waiting->waiters)) |
proc_table[i].cond_waiting->used_for_waiting = NULL; |
proc_table[i].cond_waiting = NULL; |
l = proc_table[i].task_level; |
level_table[l]->task_insert(l,i); |
level_table[l]->public_unblock(l,i); |
/* then, the kill_request flag is set, and when the task is rescheduled |
it autokill itself... */ |
102,7 → 103,8 |
register_cancellation_point(condition_cancellation_point, NULL); |
} |
cond->waiters = NIL; |
iq_init (&cond->waiters, &freedesc, 0); |
cond->used_for_waiting = NULL; |
return 0; |
110,7 → 112,7 |
int cond_destroy(cond_t *cond) |
{ |
if (cond->waiters != NIL) |
if (!iq_isempty(&cond->waiters)) |
return (EBUSY); |
return 0; |
123,11 → 125,11 |
proc_table[exec_shadow].context = kern_context_save(); |
if (cond->waiters != NIL) { |
p = q_getfirst(&cond->waiters); |
if (!iq_isempty(&cond->waiters)) { |
p = iq_getfirst(&cond->waiters); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
scheduler(); |
} |
143,13 → 145,13 |
proc_table[exec_shadow].context = kern_context_save(); |
if (cond->waiters != NIL) { |
if (!iq_isempty(&cond->waiters)) { |
do { |
p = q_getfirst(&cond->waiters); |
p = iq_getfirst(&cond->waiters); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
} while(cond->waiters != NIL); |
level_table[l]->public_unblock(l,p); |
} while(!iq_isempty(&cond->waiters)); |
scheduler(); |
} |
160,8 → 162,6 |
int cond_wait(cond_t *cond, mutex_t *mutex) |
{ |
LEVEL l; |
struct timespec ty; |
TIME tx; |
/* Why I used task_nopreempt???... because we have to unlock the mutex, |
and we can't call mutex_unlock after kern_context_save (the unlock |
198,23 → 198,14 |
/* now, we really block the task... */ |
proc_table[exec_shadow].context = kern_context_save(); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task in the condition queue */ |
proc_table[exec_shadow].status = WAIT_COND; |
q_insert(exec_shadow,&cond->waiters); |
iq_priority_insert(exec_shadow,&cond->waiters); |
/* then, we set into the processor descriptor the condition on that |
the task is blocked... (if the task is killed while it is waiting |
242,7 → 233,7 |
if (proc_table[exec_shadow].cond_waiting != NULL) { |
proc_table[exec_shadow].cond_waiting = NULL; |
if (cond->waiters == NIL) cond->used_for_waiting = NULL; |
if (iq_isempty(&cond->waiters)) cond->used_for_waiting = NULL; |
} |
task_preempt(); |
268,8 → 259,8 |
PID p = (PID)arg; |
LEVEL l; |
q_extract(p,&proc_table[p].cond_waiting->waiters); |
if (proc_table[p].cond_waiting->waiters == NIL) |
iq_extract(p,&proc_table[p].cond_waiting->waiters); |
if (iq_isempty(&proc_table[p].cond_waiting->waiters)) |
proc_table[p].cond_waiting->used_for_waiting = NULL; |
proc_table[p].cond_waiting = NULL; |
276,7 → 267,7 |
proc_table[p].delay_timer = -1; |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
event_need_reschedule(); |
} |
286,8 → 277,6 |
{ |
LEVEL l; |
int returnvalue = 0; |
struct timespec ty; |
TIME tx; |
/* Why I used task_nopreempt???... because we have to unlock the mutex, |
and we can't call mutex_unlock after kern_context_save (the unlock |
324,23 → 313,14 |
/* now, we really block the task... */ |
proc_table[exec_shadow].context = kern_context_save(); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* we insert the task in the condition queue */ |
proc_table[exec_shadow].status = WAIT_COND; |
q_insert(exec_shadow,&cond->waiters); |
iq_priority_insert(exec_shadow,&cond->waiters); |
/* then, we set into the processor descriptor the condition on that |
the task is blocked... (if the task is killed while it is waiting |
359,7 → 339,7 |
ll_context_to(proc_table[exec_shadow].context); |
if (proc_table[exec_shadow].delay_timer != -1) |
event_delete(proc_table[exec_shadow].delay_timer); |
kern_event_delete(proc_table[exec_shadow].delay_timer); |
kern_sti(); |
379,7 → 359,7 |
if (proc_table[exec_shadow].cond_waiting != NULL) { |
proc_table[exec_shadow].cond_waiting = NULL; |
if (cond->waiters == NIL) cond->used_for_waiting = NULL; |
if (iq_isempty(&cond->waiters)) cond->used_for_waiting = NULL; |
} |
else |
/* cond_waiting == NULL if the task is killed or the timer has fired */ |
/shark/tags/rel_0_3/kernel/nanoslp.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: nanoslp.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: nanoslp.c,v 1.2 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
This file contains the nanosleep function (posix 14.2.5) and related |
82,13 → 82,13 |
/* the task that have to be killed is waiting on a nanosleep */ |
/* the nanosleep event have to be removed */ |
event_delete(proc_table[i].delay_timer); |
kern_event_delete(proc_table[i].delay_timer); |
proc_table[i].delay_timer = -1; |
/* and the task have to be reinserted into the ready queues, so it |
will fall into task_testcancel */ |
l = proc_table[i].task_level; |
level_table[l]->task_insert(l,i); |
level_table[l]->public_unblock(l,i); |
return 1; |
} |
104,18 → 104,18 |
if (proc_table[i].status == WAIT_NANOSLEEP) { |
/* the task is waiting on a nanosleep and it is still receiving a |
signal... */ |
ll_gettime(TIME_EXACT,&t1); |
kern_gettime(&t1); |
SUBTIMESPEC(&nanosleep_table[i], &t1, &t2); |
TIMESPEC_ASSIGN(&nanosleep_table[i], &t2); |
/* the nanosleep event have to be removed */ |
event_delete(proc_table[i].delay_timer); |
kern_event_delete(proc_table[i].delay_timer); |
proc_table[i].delay_timer = -1; |
/* and the task have to be reinserted into the ready queues, so it |
will fall into task_testcancel */ |
l = proc_table[i].task_level; |
level_table[l]->task_insert(l,i); |
level_table[l]->public_unblock(l,i); |
return 1; |
} |
133,7 → 133,7 |
proc_table[p].delay_timer = -1; |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
event_need_reschedule(); |
} |
141,8 → 141,6 |
int nanosleep(const struct timespec *rqtp, struct timespec *rmtp) |
{ |
struct timespec ty; |
TIME tx; |
LEVEL l; |
if (rqtp->tv_sec < 0 || rqtp->tv_nsec > 1000000000) |
158,20 → 156,11 |
register_interruptable_point(nanosleep_interrupted_by_signal, NULL); |
} |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
/* now, we block the current task, waiting the end of the target task */ |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
proc_table[exec_shadow].status = WAIT_NANOSLEEP; |
ADDTIMESPEC(&schedule_time, rqtp, &nanosleep_table[exec_shadow]); |
/shark/tags/rel_0_3/kernel/kill.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: kill.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: kill.c,v 1.3 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
This file contains: |
128,7 → 128,7 |
x = proc_table[i].waiting_for_me; |
l = proc_table[x].task_level; |
level_table[l]->task_insert(l,x); |
level_table[l]->public_unblock(l,x); |
proc_table[x].shadow = x; |
} |
145,7 → 145,7 |
the task being canceled... */ |
for (p = 0; p<MAX_PROC; p++) |
if (p != i && proc_table[p].shadow == i) { |
kern_raise(XUNVALID_KILL_SHADOW,i); |
kern_raise(XINVALID_KILL_SHADOW,i); |
return; |
} |
181,7 → 181,7 |
resource_table[l]->res_detach(l,i); |
lev = proc_table[i].task_level; |
level_table[lev]->task_end(lev,i); |
level_table[lev]->public_end(lev,i); |
/* THIS ASSIGNMENT MUST STAY HERE!!! |
if we move it near the scheduler (after the counter checks) |
207,16 → 207,9 |
sys_end(); |
} |
/* SAME AS SCHEDULE, but not complete!!! */ |
ll_gettime(TIME_EXACT, &schedule_time); |
/* we don't have to manage the capacity... because we are killing |
ourselves */ |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
/* there is no epilogue... */ |
scheduler(); |
} |
250,7 → 243,7 |
kern_cli(); |
if (proc_table[i].control & NO_KILL || |
proc_table[i].status == FREE) { |
errno = EUNVALID_KILL; |
errno = EINVALID_KILL; |
kern_sti(); |
return -1; |
} |
296,7 → 289,7 |
int j; /* a counter */ |
if (g == 0) { |
errno = EUNVALID_GROUP; |
errno = EINVALID_GROUP; |
return -1; |
} |
/shark/tags/rel_0_3/kernel/time.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: time.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: time.c,v 1.2 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
This file contains the functions defined in time.h |
212,7 → 212,7 |
/* delete the event if the timer is armed */ |
if (timer_table[timerid].event != -1) |
event_delete(timer_table[timerid].event); |
kern_event_delete(timer_table[timerid].event); |
if (timer_table[timerid].evp.sigev_notify == SIGEV_SIGNAL) { |
if (!(sig_queue[ timer_table[timerid].signal ].flags & SIGNAL_POSTED)) { |
354,7 → 354,7 |
NULL_TIMESPEC(&ovalue->it_value); |
else { |
/* the timer is armed, return the remaining expiration time */ |
ll_gettime(TIME_EXACT, &ct); |
kern_gettime(&ct); |
ct_read = 1; |
SUBTIMESPEC(&timer_table[timerid].current, &ct, &ovalue->it_value); |
} |
365,7 → 365,7 |
/* if it_value is 0, the timer shall be disarmed; if != 0, the timer is |
armed: in all the cases, the event must be deleted... */ |
if (timer_table[timerid].event != -1) |
event_delete(timer_table[timerid].event); |
kern_event_delete(timer_table[timerid].event); |
if (value->it_value.tv_sec != 0 || value->it_value.tv_nsec != 0) { |
/* it_value != 0 -> arm the timer! */ |
377,7 → 377,7 |
else { |
/* the time is relative to current time */ |
if (!ct_read) |
ll_gettime(TIME_EXACT, &ct); |
kern_gettime(&ct); |
ADDTIMESPEC(&ct, &value->it_value, &timer_table[timerid].current); |
} |
timer_table[timerid].event = |
415,7 → 415,7 |
NULL_TIMESPEC(&value->it_value); |
else { |
/* the timer is armed, return the remaining expiration time */ |
ll_gettime(TIME_EXACT, &ct); |
kern_gettime(&ct); |
SUBTIMESPEC(&timer_table[timerid].current, &ct, &value->it_value); |
} |
/* and return the reactivation period */ |
/shark/tags/rel_0_3/kernel/kern.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: kern.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: kern.c,v 1.3 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
This file contains: |
96,7 → 96,7 |
PID exec; /*+ Task advised by the scheduler +*/ |
PID exec_shadow; /*+ Currently executing task +*/ |
QUEUE freedesc; /*+ Free descriptor handled as a queue +*/ |
IQUEUE freedesc; /*+ Free descriptor handled as a queue +*/ |
DWORD sys_tick; /*+ System tick (in usec) +*/ |
struct timespec schedule_time; |
117,9 → 117,26 |
/*+ Process descriptor table +*/ |
proc_des proc_table[MAX_PROC]; |
/*+ Level descriptor table +*/ |
/* Scheduling modules descriptor table */ |
/* ------------------------------------------------------------------------ */ |
/* the descriptor table */ |
level_des *level_table[MAX_SCHED_LEVEL]; |
/* ... and the size of each descriptor */ |
size_t level_size[MAX_SCHED_LEVEL]; |
/* an utilization counter incremented if a level is used by another module */ |
int level_used[MAX_SCHED_LEVEL]; |
/* these data structures (first, last, free, next & prev) |
are used to implement a double linked list of scheduling modules. |
That list is used by the scheduler to call the module's schedulers. */ |
int level_first; /* first module in the list */ |
int level_last; /* last module in the list */ |
int level_free; /* free single linked list of free module descriptors. */ |
int level_next[MAX_SCHED_LEVEL]; |
int level_prev[MAX_SCHED_LEVEL]; |
/* ------------------------------------------------------------------------ */ |
/*+ Resource descriptor table +*/ |
resource_des *resource_table[MAX_RES_LEVEL]; |
197,7 → 214,6 |
void scheduler(void) |
{ |
LEVEL l; /* a counter */ |
TIME tx; /* a dummy used for time computation */ |
struct timespec ty; /* a dummy used for time computation */ |
PID p; /* p is the task chosen by the level scheduler */ |
211,6 → 227,8 |
(proc_table[exec_shadow].control & NO_PREEMPT) ) ) |
return; |
// kern_printf("(!"); |
/* |
exec_shadow = exec = -1 only if the scheduler is called from: |
. task_endcycle |
229,48 → 247,38 |
- call an epilogue |
*/ |
/* then, we call the epilogue. the epilogue tipically checks the |
avail_time field... */ |
if (exec_shadow != -1) { |
// ok is set 4 debug :-( |
ok = ll_gettime(TIME_EXACT, &schedule_time); |
// kern_printf("(%d sched s%d ns%d)", ok, schedule_time.tv_sec, schedule_time.tv_nsec); |
kern_epilogue_macro(); |
/* manage the capacity event */ |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
/* if the event didn't fire before, we delete it. */ |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
/* then, we call the epilogue. the epilogue tipically checks the |
avail_time field... */ |
// kern_printf("(e%d)",exec_shadow); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_epilogue(l,exec_shadow); |
level_table[l]->public_epilogue(l,exec_shadow); |
} |
l = 0; |
// kern_printf("["); |
l = level_first; |
for(;;) { |
do { |
p = level_table[l]->level_scheduler(l); |
p = level_table[l]->public_scheduler(l); |
// kern_printf("p=%d",p); |
if (p != NIL) |
ok = level_table[ proc_table[p].task_level ]-> |
task_eligible(proc_table[p].task_level,p); |
public_eligible(proc_table[p].task_level,p); |
else |
ok = 0; |
// kern_printf(" ok=%d",ok); |
} while (ok < 0); /* repeat the level scheduler if the task isn't |
eligible... (ex. in the aperiodic servers...) */ |
if (p != NIL) break; |
l++; /* THERE MUST BE a level with a task to schedule */ |
l = level_next[l]; /* THERE MUST BE a level with a task to schedule */ |
// kern_printf(" l=%d",l); |
}; |
// kern_printf("]"); |
/* tracer stuff */ |
//trc_logevent(exec,TRC_SCHEDULE,NULL,0); |
284,15 → 292,17 |
//trc_logevent(exec_shadow,TRC_DISPATCH,NULL,0); |
if (old_exec_shadow!=exec_shadow) |
trc_logevent(TRC_SCHEDULE,&exec_shadow); |
// kern_printf("[%i->%i]",old_exec_shadow,exec_shadow); |
// kern_printf("[%i->%i]",old_exec_shadow,exec_shadow); |
/* we control the correctness of the shadows when we kill */ |
proc_table[exec_shadow].status = EXE; |
//kern_printf("(d%d)",exec_shadow); |
// kern_printf("(d%d)",exec_shadow); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_dispatch(l, exec_shadow, exec!=exec_shadow); |
level_table[l]->public_dispatch(l, exec_shadow, exec!=exec_shadow); |
// kern_printf("*"); |
/* Finally,we post the capacity event, BUT |
. only if the task require that |
. only if exec==exec_shadow (if a task is blocked we don't want |
301,13 → 311,13 |
&& exec==exec_shadow) { |
TIMESPEC_ASSIGN(&ty, &schedule_time); |
ADDUSEC2TIMESPEC(proc_table[exec_shadow].avail_time,&ty); |
// kern_printf("³s%d ns%d sched s%d ns%d³",ty.tv_sec,ty.tv_nsec, schedule_time.tv_sec, schedule_time.tv_nsec); |
// kern_printf("³s%d ns%d sched s%d ns%d³",ty.tv_sec,ty.tv_nsec, schedule_time.tv_sec, schedule_time.tv_nsec); |
cap_timer = kern_event_post(&ty, capacity_timer, NULL); |
} |
/* set the time at witch the task is scheduled */ |
TIMESPEC_ASSIGN(&cap_lasttime, &schedule_time); |
//if (runlevel != 1) kern_printf("(s%d)",exec_shadow); |
// kern_printf("(s%d)",exec_shadow); |
} |
325,8 → 335,8 |
bandwidth_t num=MAX_BANDWIDTH; |
int l; |
for (l =0; l<MAX_SCHED_LEVEL && level_table[l]->level_guarantee; l++) |
if (!level_table[l]->level_guarantee(l,&num)) |
for (l =0; l<MAX_SCHED_LEVEL && level_table[l]->public_guarantee; l++) |
if (!level_table[l]->public_guarantee(l,&num)) |
return -1; |
return 0; /* OK */ |
378,7 → 388,7 |
* |
*/ |
runlevel = 0; |
runlevel = RUNLEVEL_STARTUP; |
/* The kernel startup MUST proceed with int disabled! */ |
kern_cli(); |
400,13 → 410,12 |
proc_table[i].frozen_activations = 0; |
proc_table[i].sigmask = 0; |
proc_table[i].sigpending = 0; |
NULL_TIMESPEC(&proc_table[i].request_time); |
proc_table[i].avail_time = 0; |
proc_table[i].shadow = i; |
proc_table[i].cleanup_stack= NULL; |
proc_table[i].errnumber = 0; |
proc_table[i].priority = 0; |
NULL_TIMESPEC(&proc_table[i].timespec_priority); |
//proc_table[i].priority = 0; |
//NULL_TIMESPEC(&proc_table[i].timespec_priority); |
proc_table[i].delay_timer = -1; |
proc_table[i].wcet = -1; |
424,12 → 433,17 |
for (j=0; j<PTHREAD_KEYS_MAX; j++) |
proc_table[i].keys[j] = NULL; |
} |
for (i = 0; i < MAX_PROC-1; i++) proc_table[i].next = i+1; |
proc_table[MAX_PROC-1].next = NIL; |
for (i = MAX_PROC-1; i > 0; i--) proc_table[i].prev = i-1; |
proc_table[0].prev = NIL; |
freedesc = 0; |
/* set up the free descriptor queue */ |
// for (i = 0; i < MAX_PROC-1; i++) proc_table[i].next = i+1; |
// proc_table[MAX_PROC-1].next = NIL; |
// for (i = MAX_PROC-1; i > 0; i--) proc_table[i].prev = i-1; |
// proc_table[0].prev = NIL; |
// freedesc = 0; |
iq_init(&freedesc, NULL, 0); |
for (i = 0; i < MAX_PROC; i++) |
iq_insertlast(i,&freedesc); |
/* Set up the varius stuff */ |
global_errnumber = 0; |
task_counter = 0; |
451,7 → 465,11 |
/* Init VM layer (Interrupts, levels & memory management) */ |
/* for old exception handling, use excirq_init() */ |
signals_init(); |
set_default_exception_handler(); |
/* Clear scheduling modules registration data */ |
levels_init(); |
sys_tick = __kernel_register_levels__(multiboot); |
/* tracer stuff */ |
476,7 → 494,7 |
parms.tick = sys_tick; |
/* |
* Runlevel 1: Let's go!!!! |
* Runlevel INIT: Let's go!!!! |
* |
* |
*/ |
492,6 → 510,17 |
/* call the init functions */ |
call_runlevel_func(RUNLEVEL_INIT, 0); |
/* |
* Runlevel RUNNING: Hoping that all works fine ;-) |
* |
* |
*/ |
runlevel = RUNLEVEL_RUNNING; |
/* reset keyboard after exit */ |
// sys_atexit((void(*)(void *))C8042_restore,NULL,AFTER_EXIT); |
499,7 → 528,7 |
trc_resume(); |
/* exec and exec_shadow are already = -1 */ |
ll_gettime(TIME_EXACT, &schedule_time); |
kern_gettime(&schedule_time); |
scheduler(); |
global_context = ll_context_from(); /* It will be used by sys_end */ |
ll_context_to(proc_table[exec_shadow].context); |
514,7 → 543,7 |
/* |
* Runlevel 2: Shutting down the system... :-( |
* Runlevel SHUTDOWN: Shutting down the system... :-( |
* |
* |
*/ |
530,13 → 559,11 |
/* 1 when the error code is != 0 */ |
aborting = global_errnumber > 0; |
//kern_printf("after - system_counter=%d, task_counter = %d\n", |
// system_counter,task_counter); |
//kern_printf("after - system_counter=%d, task_counter = %d\n", system_counter,task_counter); |
call_runlevel_func(RUNLEVEL_SHUTDOWN, aborting); |
//kern_printf("before - system_counter=%d, task_counter = %d\n", |
// system_counter,task_counter); |
//kern_printf("before - system_counter=%d, task_counter = %d\n", system_counter,task_counter); |
if (system_counter) { |
/* To shutdown the kernel correctly, we have to wait that all the SYSTEM |
547,15 → 574,15 |
We do nothing for user tasks that remain active (because, for example, |
they have the cancelability set to deferred) when the system goes to |
runlevel 3 */ |
//kern_printf("Û%lu",ll_gettime(TIME_EXACT,NULL)); |
//kern_printf("Û%lu",kern_gettime(NULL)); |
kill_user_tasks(); |
//kern_printf("Û%lu",ll_gettime(TIME_EXACT,NULL)); |
//kern_printf("Û%lu",kern_gettime(NULL)); |
/* we have to go again in multitasking mode!!! */ |
mustexit = 0; |
/* exec and exec_shadow are already = -1 */ |
ll_gettime(TIME_EXACT, &schedule_time); |
kern_gettime(&schedule_time); |
global_context = ll_context_from(); /* It will be used by sys_end */ |
scheduler(); |
568,7 → 595,7 |
/* |
* Runlevel 3: Before Halting the system |
* Runlevel BEFORE_EXIT: Before Halting the system |
* |
* |
*/ |
592,7 → 619,7 |
/* |
* Runlevel 4: After halting... |
* Runlevel AFTER_EXIT: After halting... |
* |
* |
*/ |
615,47 → 642,48 |
} |
/* IMPORTANT!!! |
I'm almost sure the shutdown procedure does not work into interrupts. */ |
void internal_sys_end(int i) |
{ |
LEVEL l; /* a counter */ |
TIME tx; /* a dummy used for time computation */ |
struct timespec ty; /* a dummy used for time computation */ |
/* if something goes wron during the real mode */ |
if (runlevel==RUNLEVEL_STARTUP || runlevel==RUNLEVEL_AFTER_EXIT) |
l1_exit(i); |
//kern_printf("mustexit=%d",mustexit); |
if (!mustexit) { |
if (!ll_ActiveInt()) |
proc_table[exec_shadow].context = kern_context_save(); |
global_errnumber = i; |
if (mustexit) |
return; |
mustexit = 1; |
mustexit = 1; |
global_errnumber = i; |
if (!ll_ActiveInt()) { |
proc_table[exec_shadow].context = kern_context_save(); |
if (exec_shadow != -1) { |
ll_gettime(TIME_EXACT, &schedule_time); |
/* manage the capacity event */ |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
/* if the event didn't fire before, we delete it. */ |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_gettime(&schedule_time); |
kern_epilogue_macro(); |
/* then, we call the epilogue. the epilogue tipically checks the |
avail_time field... */ |
avail_time field... */ |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_epilogue(l,exec_shadow); |
level_table[l]->public_epilogue(l,exec_shadow); |
exec_shadow = exec = -1; |
} |
kern_context_load(global_context); |
} |
if (ll_ActiveInt()) |
ll_context_to(global_context); |
else |
kern_context_load(global_context); |
if (ll_ActiveInt()) { |
ll_context_to(global_context); |
/* The context change will be done when all the interrupts end!!! */ |
} |
//kern_printf("fine sysend"); |
/* the control reach this line only if we call sys_end() into an event |
664,64 → 692,29 |
} |
/*+ Close the system & return to HOST OS. |
Can be called from all the tasks... |
The first time it is called it jumps to the global context |
The second time it jumps only if there are no system task remaining |
The error code passed is 0... (it is saved on the first call!!!) +*/ |
void sys_end(void) |
/* |
Close the system & return to HOST OS. |
Can be called from tasks and from ISRS |
*/ |
void sys_abort(int err) |
{ |
SYS_FLAGS f; |
/* the sys_end change the context to the global context. |
when the first time is called, it simply kills all the users tasks |
and waits the system tasks to end... */ |
/*kern_printf("°sys_end %d°",exec_shadow);*/ |
/*return;*/ |
f = kern_fsave(); |
if (runlevel != RUNLEVEL_INIT && system_counter) { |
kern_frestore(f); |
return; |
} |
internal_sys_end(0); |
internal_sys_end(err); |
kern_frestore(f); |
} |
/*+ Close the system & return to HOST OS. |
Can be called from all the tasks... |
The first time it is called it works as the sys_end |
The second time it jumps every time |
The error code passed is 0... +*/ |
void sys_abort(int err) |
void sys_end(void) |
{ |
/* the sys_end change the context to the global context. |
when the first time is called, it simply kills all the users tasks |
and waits the system tasks to end... */ |
internal_sys_end(err); |
sys_abort(0); |
} |
/*+ equal to sys_end! +*/ |
void _exit(int status) |
{ |
SYS_FLAGS f; |
/* the sys_end change the context to the global context. |
when the first time is called, it simply kills all the users tasks |
and waits the system tasks to end... */ |
/*kern_printf("°sys_end %d°",exec_shadow);*/ |
/*return;*/ |
f = kern_fsave(); |
if (runlevel != RUNLEVEL_INIT && system_counter) { |
kern_frestore(f); |
return; |
} |
internal_sys_end(status); |
kern_frestore(f); |
sys_abort(status); |
} |
741,7 → 734,7 |
TIME x; |
f = kern_fsave(); |
x = ll_gettime(TIME_EXACT,t); |
x = kern_gettime(t); |
kern_frestore(f); |
return x; |
/shark/tags/rel_0_3/kernel/join.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: join.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: join.c,v 1.3 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
task join and related primitives |
80,7 → 80,7 |
proc_table[i].shadow = i; |
l = proc_table[i].task_level; |
level_table[l]->task_insert(l,i); |
level_table[l]->public_unblock(l,i); |
return 1; |
} |
96,8 → 96,6 |
{ |
PID x; /* used to follow the shadow chain */ |
int blocked = 0; /* a flag */ |
struct timespec ty; |
TIME tx; |
LEVEL l; |
/* task_join is a cancellation point... if the task is suspended |
153,20 → 151,11 |
proc_table[p].waiting_for_me = exec_shadow; |
proc_table[exec_shadow].shadow = p; |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
/* now, we block the current task, waiting the end of the target task */ |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
proc_table[exec_shadow].status = WAIT_JOIN; |
exec = exec_shadow = -1; |
188,7 → 177,7 |
queue */ |
proc_table[p].control &= ~WAIT_FOR_JOIN; |
if (proc_table[p].control & DESCRIPTOR_DISCARDED) |
q_insertfirst(p, &freedesc); |
iq_insertfirst(p, &freedesc); |
if (value) |
*value = proc_table[p].return_value; |
/shark/tags/rel_0_3/kernel/tpreempt.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: tpreempt.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: tpreempt.c,v 1.2 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
task_preempt and task_nopreempt |
72,7 → 72,7 |
kern_cli(); |
proc_table[exec_shadow].control |= NO_PREEMPT; |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
kern_event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_sti(); |
/shark/tags/rel_0_3/kernel/grpcreat.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: grpcreat.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: grpcreat.c,v 1.3 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
This file contains: |
116,7 → 116,7 |
/* Get a free descriptor */ |
for (;;) { |
i = q_getfirst(&freedesc); |
i = iq_getfirst(&freedesc); |
/* If no one is available abort the system */ |
if (i == NIL) { |
145,7 → 145,7 |
proc_table[i].sigpending = 0; /* No pending signal for new tasks*/ |
proc_table[i].shadow = i; |
proc_table[i].cleanup_stack = NULL; |
proc_table[i].next = proc_table[i].prev = NIL; |
// proc_table[i].next = proc_table[i].prev = NIL; |
proc_table[i].errnumber = 0; /* meaningless value */ |
/* Fill jet info */ |
168,7 → 168,6 |
- master_level (initialized later, modified by l[]->task_create() ) |
- task_level (initialized later in this function) |
- context, stack (initialized at the end of this function) |
- request_time (initialized when a request (activation) is issued) |
- additional stuff like priority & co. have to be init. only if used...) |
- delay_timer (initialized in __kernel_init__ and mantained coherent |
by the scheduling modules...) |
176,13 → 175,14 |
*/ |
/* search for a level that can manage the task model */ |
for (l=0; l<sched_levels; l++) |
if (level_table[l]->level_accept_task_model(l,m) >= 0) |
for (l=level_first; l != -1; l=level_next[l]) |
if (level_table[l]->public_create(l,i,m) >= 0) |
break; |
if (l == sched_levels) { |
if (l == -1) { |
/* no level can accept the task_model, exit!!! */ |
proc_table[i].status = FREE; |
q_insertfirst(i,&freedesc); |
iq_insertfirst(i,&freedesc); |
errno = ENO_AVAIL_SCHEDLEVEL; |
return -1; |
} |
190,15 → 190,6 |
/* initialize task level */ |
proc_table[i].task_level = l; |
/* calls the task-oriented function task_create */ |
if (level_table[l]->task_create(l,i,m) < 0) { |
/* an error occurred in the task_create */ |
proc_table[i].status = FREE; |
q_insertfirst(i,&freedesc); |
errno = ETASK_CREATE; |
return -1; |
} |
/* register all the resource models passed */ |
for (;;) { |
r = va_arg(rlist,RES_MODEL *); |
207,10 → 198,8 |
/* search for a level that can manage the resource model */ |
for (l_res=0; l_res<res_levels; l_res++) |
if (resource_table[l_res]->level_accept_resource_model(l_res,r) >= 0) { |
resource_table[l_res]->res_register(l_res, i, r); |
if (resource_table[l_res]->res_register(l_res, i, r) >= 0) |
break; |
} |
if (l_res == res_levels) { |
/* no level can accept the resource_model, exit!!! */ |
/* detach the resources and the task */ |
340,11 → 329,11 |
resource_table[lr]->res_detach(lr,i); |
l = proc_table[i].task_level; |
level_table[l]->task_detach(l,i); |
level_table[l]->public_detach(l,i); |
proc_table[i].status = FREE; |
q_insertfirst(i,&freedesc); |
iq_insertfirst(i,&freedesc); |
} |
379,7 → 368,7 |
va_end(rlist); |
if (p != NIL) { |
if (level_table[proc_table[p].task_level]->level_guarantee) |
if (level_table[proc_table[p].task_level]->public_guarantee) |
if (guarantee() < 0) { |
group_create_reject(p); |
errno = ENO_GUARANTEE; |
/shark/tags/rel_0_3/kernel/tskmsg.c |
---|
0,0 → 1,104 |
/* |
* Project: S.Ha.R.K. |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors : |
* Paolo Gai <pj@gandalf.sssup.it> |
* (see the web pages for full authors list) |
* |
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
*/ |
/** |
------------ |
CVS : $Id: tskmsg.c,v 1.1 2003-01-07 17:09:24 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1 $ |
Last update: $Date: 2003-01-07 17:09:24 $ |
------------ |
**/ |
/* |
* Copyright (C) 2002 Paolo Gai |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* |
*/ |
#include <stdarg.h> |
#include <ll/ll.h> |
#include <ll/stdlib.h> |
#include <ll/stdio.h> |
#include <ll/string.h> |
#include <kernel/config.h> |
#include <kernel/model.h> |
#include <kernel/const.h> |
#include <sys/types.h> |
#include <kernel/types.h> |
#include <kernel/descr.h> |
#include <errno.h> |
#include <kernel/var.h> |
#include <kernel/func.h> |
#include <kernel/trace.h> |
/* |
The running task (pointed by exec_shadow) sent a message m to the |
scheduling module that handle the task p. |
If the message has value NULL the behavior should be the |
task_endcycle primitive behavior, and an endcycle tracer event is |
generated. |
*/ |
int task_message(void *m, int reschedule) |
{ |
LEVEL l; /* for readableness only */ |
int retvalue; |
if (reschedule) { |
proc_table[exec_shadow].context = kern_context_save(); |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
retvalue = level_table[l]->public_message(l,exec_shadow,m); |
exec = exec_shadow = -1; |
scheduler(); |
kern_context_load(proc_table[exec_shadow].context); |
} else { |
SYS_FLAGS f; |
f = kern_fsave(); |
l = proc_table[exec_shadow].task_level; |
retvalue = level_table[l]->public_message(l,exec_shadow,m); |
kern_frestore(f); |
} |
return retvalue; |
} |
/shark/tags/rel_0_3/kernel/exchand.c |
---|
0,0 → 1,104 |
/* |
* Project: S.Ha.R.K. |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors : |
* Paolo Gai <pj@gandalf.sssup.it> |
* (see the web pages for full authors list) |
* |
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
*/ |
/** |
------------ |
CVS : $Id: exchand.c,v 1.1 2003-01-07 17:09:23 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1 $ |
Last update: $Date: 2003-01-07 17:09:23 $ |
------------ |
**/ |
/* |
* Copyright (C) 2000 Paolo Gai |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* |
*/ |
#include <kernel/kern.h> |
static int myflag; |
static siginfo_t myinfo; |
static struct timespec mytime; |
static void thehandler(int signo, siginfo_t *info, void *extra); |
static void theend(void *arg); |
/* |
This exception handler should be good for text applications that do NOT |
use graphics |
*/ |
int set_default_exception_handler(void) |
{ |
struct sigaction action; |
myflag = 0; |
sys_atrunlevel(theend, NULL, RUNLEVEL_AFTER_EXIT); |
/* Init the standard S.Ha.R.K. exception handler */ |
action.sa_flags = SA_SIGINFO; /* Set the signal action */ |
action.sa_sigaction = thehandler; |
action.sa_handler = 0; |
sigfillset(&action.sa_mask); /* we block all the other signals... */ |
return sigaction(SIGHEXC, &action, NULL); /* set the signal */ |
} |
static void thehandler(int signo, siginfo_t *info, void *extra) |
{ |
if (!myflag) { |
myflag = 1; |
myinfo = *info; |
sys_gettime(&mytime), |
sys_abort(AHEXC); |
} |
} |
static void theend(void *arg) |
{ |
if (myflag) { |
kern_printf("\nS.Ha.R.K. Exception raised!!!" |
"\nTime (s:ns) :%ld:%ld" |
"\nException number:%d" |
"\nPID :%d\n", |
mytime.tv_sec, |
mytime.tv_nsec, |
myinfo.si_value.sival_int, |
myinfo.si_task); |
} |
} |
/shark/tags/rel_0_3/kernel/signal.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: signal.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
CVS : $Id: signal.c,v 1.3 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:52 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
This file contains: |
108,7 → 108,7 |
* A queue of all threads waiting in sigwait. |
* It is not static because it is used into the task_kill...ð |
*/ |
static QUEUE sigwaiters; |
static IQUEUE sigwaiters; |
/*+ An array of queues of pending signals posted with sigqueue(). +*/ |
331,10 → 331,10 |
LEVEL l; |
/* Reactivate the task... */ |
q_extract(p, &sigwaiters); |
iq_extract(p, &sigwaiters); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
} |
469,9 → 469,9 |
* in sigwait will have blocked the signals being waited for). |
*/ |
for (task = sigwaiters; |
for (task = iq_query_first(&sigwaiters); |
task != NIL; |
task = proc_table[task].next) { |
task = iq_query_next(task, &sigwaiters)) { |
if (sigismember(&proc_table[task].sigwaiting, signo)) { |
LEVEL l; |
479,12 → 479,12 |
sigaddset(&proc_table[task].sigpending, signo); |
/* Reactivate the task... */ |
q_extract(task, &sigwaiters); |
iq_extract(task, &sigwaiters); |
l = proc_table[task].task_level; |
level_table[l]->task_insert(l,task); |
level_table[l]->public_unblock(l,task); |
if (proc_table[task].delay_timer != -1) { |
event_delete(proc_table[task].delay_timer); |
kern_event_delete(proc_table[task].delay_timer); |
proc_table[task].delay_timer = -1; |
} |
614,9 → 614,9 |
* the FIFO order, and how to prevent lost signals in the case that |
* a thread calls sigwait before the woken thread runs and gets it. |
*/ |
for (task = sigwaiters; |
for (task = iq_query_first(&sigwaiters); |
task != NIL; |
task = proc_table[task].next) { |
task = iq_query_next(task, &sigwaiters)) { |
if (sigismember(&proc_table[task].sigwaiting, signo)) { |
LEVEL l; |
624,13 → 624,13 |
sigaddset(&proc_table[task].sigpending, signo); |
/* Reactivate the task... */ |
q_extract(task, &sigwaiters); |
iq_extract(task, &sigwaiters); |
l = proc_table[task].task_level; |
level_table[l]->task_insert(l,task); |
level_table[l]->public_unblock(l,task); |
if (proc_table[task].delay_timer != -1) { |
event_delete(proc_table[task].delay_timer); |
kern_event_delete(proc_table[task].delay_timer); |
proc_table[task].delay_timer = -1; |
} |
697,10 → 697,10 |
proc_table[p].control |= SIGTIMEOUT_EXPIRED; |
/* insert the task into the ready queue and extract it from the waiters */ |
q_extract(p, &sigwaiters); |
iq_extract(p, &sigwaiters); |
l = proc_table[p].task_level; |
level_table[l]->task_insert(l,p); |
level_table[l]->public_unblock(l,p); |
event_need_reschedule(); |
} |
718,8 → 718,6 |
proc_des *pthread = &proc_table[exec_shadow]; |
int thissig; |
struct timespec ty; |
TIME tx; |
LEVEL l; |
task_testcancel(); |
754,7 → 752,7 |
* Grab the first queue entry. |
*/ |
sos = sigqueued[thissig]; |
sigqueued[thissig] = sig_queue[sigqueued[thissig]].next; |
sigqueued[thissig] = sig_queue[sos].next; |
/* |
* If that was the last one, reset the process procsigpending. |
814,18 → 812,10 |
/* now, we really block the task... */ |
proc_table[exec_shadow].context = kern_context_save(); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
/* |
* Add this thread to the list of threads in sigwait. Once that is |
834,7 → 824,7 |
* find a thread in sigwait, but it will not be able to wake it up |
* until the waitlock is released in the switch code. |
*/ |
q_insertfirst(exec_shadow, &sigwaiters); |
iq_insertfirst(exec_shadow, &sigwaiters); |
proc_table[exec_shadow].status = WAIT_SIG; |
if (timeout) { |
841,7 → 831,7 |
/* we can use the delaytimer because if we are here we are not in a |
task_delay */ |
struct timespec t, abstime; |
ll_gettime(TIME_EXACT, &t); |
kern_gettime(&t); |
ADDTIMESPEC(&t, timeout, &abstime); |
proc_table[exec_shadow].delay_timer = |
890,7 → 880,8 |
/* |
* Grab the first queue entry. |
*/ |
sos = q_getfirst(&sigqueued[thissig]); |
sos = sigqueued[thissig]; |
sigqueued[thissig] = sig_queue[sos].next; |
/* |
* If that was the last one, reset the process procsigpending. |
1026,8 → 1017,6 |
{ |
proc_des *pthread = &proc_table[exec_shadow]; |
struct timespec ty; |
TIME tx; |
LEVEL l; |
task_testcancel(); |
1055,20 → 1044,11 |
/* now, we really block the task... */ |
proc_table[exec_shadow].context = kern_context_save(); |
/* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
ll_gettime(TIME_EXACT, &schedule_time); |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
if (cap_timer != NIL) { |
event_delete(cap_timer); |
cap_timer = NIL; |
} |
kern_epilogue_macro(); |
l = proc_table[exec_shadow].task_level; |
level_table[l]->task_extract(l,exec_shadow); |
level_table[l]->public_block(l,exec_shadow); |
q_insertfirst(exec_shadow, &sigwaiters); |
iq_insertfirst(exec_shadow, &sigwaiters); |
proc_table[exec_shadow].status = WAIT_SIGSUSPEND; |
/* and finally we reschedule */ |
1115,7 → 1095,7 |
kern_cli(); |
ll_gettime(TIME_EXACT, &temp); |
kern_gettime(&temp); |
if (alarm_timer == -1) |
returnvalue.tv_sec = 0; |
1122,7 → 1102,7 |
else { |
SUBTIMESPEC(&alarm_time, &temp, &returnvalue); |
event_delete(alarm_timer); |
kern_event_delete(alarm_timer); |
} |
if (seconds) { |
1496,14 → 1476,14 |
if (proc_table[i].status == WAIT_SIG) { |
if (proc_table[i].delay_timer != -1) { |
event_delete(proc_table[i].delay_timer); |
kern_event_delete(proc_table[i].delay_timer); |
proc_table[i].delay_timer = -1; |
} |
q_extract(i, &sigwaiters); |
iq_extract(i, &sigwaiters); |
l = proc_table[i].task_level; |
level_table[l]->task_insert(l,i); |
level_table[l]->public_unblock(l,i); |
return 1; |
} |
1510,7 → 1490,7 |
else if (proc_table[i].status == WAIT_SIGSUSPEND) { |
l = proc_table[i].task_level; |
level_table[l]->task_insert(l,i); |
level_table[l]->public_unblock(l,i); |
return 1; |
} |
1544,7 → 1524,7 |
procsigpending = 0; |
sigwaiters = NIL; |
iq_init(&sigwaiters, &freedesc, 0); |
alarm_timer = -1; |
/* Interrupt handling init */ |
/shark/tags/rel_0_3/kernel/event.c |
---|
18,11 → 18,11 |
/** |
------------ |
CVS : $Id: event.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: event.c,v 1.2 2003-01-07 17:07:49 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:07:49 $ |
------------ |
This file contains the functions to be used into events: |
94,7 → 94,7 |
/* we have to change context ONLY IF the sys_end primitive was not called |
before (e.g., in a exception raise called by a task_epilogue called by |
the scheduler. */ |
if (mustexit == 0) { |
if (!mustexit) { |
scheduler(); |
ll_context_to(proc_table[exec_shadow].context); |
} |
111,7 → 111,7 |
extern void perftest_epilogue(void); |
if (perftime_count < 10000) { |
perftime_prol[perftime_count] = ll_gettime(TIME_EXACT, NULL); |
perftime_prol[perftime_count] = kern_gettime(NULL); |
} |
event_setepilogue(perftest_epilogue); |
#else |
128,7 → 128,7 |
reschedule(); |
if (perftime_count < 10000){ |
perftime_epil[perftime_count] = ll_gettime(TIME_EXACT, NULL); |
perftime_epil[perftime_count] = kern_gettime(NULL); |
perftime_count++; |
} |
} |
140,7 → 140,7 |
if (perftime_count < 10000) { |
perftime_epil[perftime_count] = ll_gettime(TIME_EXACT, NULL); |
perftime_epil[perftime_count] = kern_gettime(NULL); |
perftime_count++; |
} |
} |
/shark/tags/rel_0_3/kernel/makefile |
---|
16,11 → 16,8 |
blkact.o \ |
cancel.o \ |
conditio.o \ |
delay.o \ |
endcycle.o \ |
event.o \ |
exchtxt.o \ |
exchgrx.o \ |
exchand.o \ |
grpcreat.o \ |
jet.o \ |
join.o \ |
37,20 → 34,16 |
printk.o \ |
perror.o \ |
pthread.o \ |
qqueue.o \ |
queue.o \ |
iqueue.o \ |
signal.o \ |
sleep.o \ |
status.o \ |
time.o \ |
tpreempt.o \ |
trace.o |
# create.o # look at kernel/create.c and kernel/grpcreat.c for more info |
trace.o \ |
tskmsg.o |
include $(BASE)/config/lib.mk |
install all clean cleanall depend:: |
make -C init $@ |
make -C modules $@ |
make -C mem $@ |
/shark/tags/rel_0_3/kernel/iqueue.c |
---|
0,0 → 1,221 |
/* |
* Project: S.Ha.R.K. |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors : |
* Paolo Gai <pj@gandalf.sssup.it> |
* (see the web pages for full authors list) |
* |
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
*/ |
/* |
------------ |
CVS : $Id: iqueue.c,v 1.1 2002-11-11 08:34:08 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1 $ |
Last update: $Date: 2002-11-11 08:34:08 $ |
------------ |
*/ |
/* |
* Copyright (C) 2002 Paolo Gai |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* |
*/ |
#include <kernel/iqueue.h> |
#include <kernel/mem.h> |
void iq_init (IQUEUE *q, IQUEUE *share, int flags) |
{ |
q->first = NIL; |
q->last = NIL; |
if (share) |
q->s = share->s; |
else { |
q->s = (struct IQUEUE_shared *)kern_alloc(sizeof(struct IQUEUE_shared)); |
if (!(flags & IQUEUE_NO_PRIORITY)) |
q->s->priority = (DWORD *)kern_alloc(sizeof(DWORD) * MAX_PROC); |
if (!(flags & IQUEUE_NO_TIMESPEC)) |
q->s->timespec_priority = (struct timespec *) |
kern_alloc(sizeof(struct timespec) * MAX_PROC); |
} |
} |
/*+ |
This function insert the task with PID i in the queue que. |
The insertion is made respecting the priority field. |
(the first item in the queue have the less priority) |
+*/ |
void iq_priority_insert (PID i, IQUEUE *que) |
{ |
DWORD prio; |
PID p,q; |
p = NIL; |
q = que->first; |
prio = que->s->priority[i]; |
while ((q != NIL) && (prio >= que->s->priority[q])) { |
p = q; |
q = que->s->next[q]; |
} |
if (p != NIL) |
que->s->next[p] = i; |
else |
que->first = i; |
if (q != NIL) |
que->s->prev[q] = i; |
else |
que->last = i; |
que->s->next[i] = q; |
que->s->prev[i] = p; |
} |
/* |
This function insert the task with PID i in the queue que. |
The insertion is made respecting the timespec priority field. |
(the first item in the queue have the less priority) |
*/ |
void iq_timespec_insert(PID i, IQUEUE *que) |
{ |
struct timespec prio; |
PID p,q; |
p = NIL; |
q = que->first; |
TIMESPEC_ASSIGN(&prio, &que->s->timespec_priority[i]); |
while ((q != NIL) && |
!TIMESPEC_A_LT_B(&prio, &que->s->timespec_priority[q])) { |
p = q; |
q = que->s->next[q]; |
} |
if (p != NIL) |
que->s->next[p] = i; |
else |
que->first = i; |
if (q != NIL) |
que->s->prev[q] = i; |
else |
que->last = i; |
que->s->next[i] = q; |
que->s->prev[i] = p; |
} |
void iq_insertfirst(PID p, IQUEUE *q) |
{ |
if (q->first != NIL) { |
q->s->next[p] = q->first; |
q->s->prev[q->first] = p; |
} |
else { |
q->last = p; |
q->s->next[p] = NIL; |
} |
q->s->prev[p] = NIL; |
q->first = p; |
} |
void iq_insertlast(PID p, IQUEUE *q) |
{ |
if (q->last != NIL) { |
q->s->prev[p] = q->last; |
q->s->next[q->last] = p; |
} |
else { |
q->first = p; |
q->s->prev[p] = NIL; |
} |
q->s->next[p] = NIL; |
q->last = p; |
} |
void iq_extract(PID i, IQUEUE *que) |
{ |
PID p,q; |
p = que->s->prev[i]; |
q = que->s->next[i]; |
if (p != NIL) |
que->s->next[p] = que->s->next[i]; |
else |
que->first = q; |
if (q != NIL) |
que->s->prev[q] = que->s->prev[i]; |
else |
que->last = p; |
} |
PID iq_getfirst(IQUEUE *q) |
{ |
PID p = q->first; |
if (p == NIL) |
return NIL; |
q->first = q->s->next[q->first]; |
if (q->first != NIL) |
q->s->prev[q->first] = NIL; |
else |
q->last = NIL; |
return p; |
} |
PID iq_getlast(IQUEUE *q) |
{ |
PID p = q->last; |
if (p == NIL) |
return NIL; |
q->last = q->s->prev[q->last]; |
if (q->last != NIL) |
q->s->next[q->last] = NIL; |
else |
q->first = NIL; |
return p; |
} |
/shark/tags/rel_0_3/kernel/blkact.c |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: blkact.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: blkact.c,v 1.2 2002-10-28 07:58:19 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2002-10-28 07:58:19 $ |
------------ |
block_activations & co. |
78,11 → 78,11 |
/* some controls on the task p */ |
if (p<0 || p>=MAX_PROC) { |
errno = EUNVALID_TASK_ID; |
errno = EINVALID_TASK_ID; |
return -1; |
} |
if (proc_table[p].status == FREE) { |
errno = EUNVALID_TASK_ID; |
errno = EINVALID_TASK_ID; |
return -1; |
} |
107,11 → 107,11 |
/* some controls on the task p */ |
if (p<0 || p>=MAX_PROC) { |
errno = EUNVALID_TASK_ID; |
errno = EINVALID_TASK_ID; |
return -1; |
} |
if (proc_table[p].status == FREE) { |
errno = EUNVALID_TASK_ID; |
errno = EINVALID_TASK_ID; |
return -1; |
} |
/shark/tags/rel_0_3/kernel/printk.c |
---|
36,11 → 36,11 |
*/ |
/* |
* CVS : $Id: printk.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
* CVS : $Id: printk.c,v 1.2 2002-10-28 07:56:49 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:52 $ |
* Revision: $Revision: 1.2 $ |
* Last update: $Date: 2002-10-28 07:56:49 $ |
*/ |
#include <ll/i386/cons.h> |
84,6 → 84,7 |
f=kern_fsave(); |
cprintf("[%s] %s",levelname[level],buf); |
/* if we called printk, and the string does not have a \n in it, add it */ |
if ((!flag)&&(!result)) cprintf("\n"); |
kern_frestore(f); |
/shark/tags/rel_0_3/include/kernel/func.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: func.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: func.h,v 1.3 2003-01-07 17:12:19 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:12:19 $ |
------------ |
Kernel functions: |
89,15 → 89,6 |
/* if a source use printk() it should include log.h not func.h */ |
#include <kernel/log.h> |
#if 0 |
#ifdef __DEBUG_ON__ |
#define printk(fmt,args...) \ |
VM_printf(fmt,##args) |
#else |
#define printk(fmt,args...) |
#endif |
#endif |
/*---------------------------------------------------------------------*/ |
/* Kernel global functions: initialization & termination... */ |
/*---------------------------------------------------------------------*/ |
107,11 → 98,39 |
TIME __kernel_register_levels__(void *arg); |
/*+ This function returns a level_des **. the value returned shall be |
used to register a level module. The function shall be called only at |
module registration time. It assume that the system is not yet |
initialized, so we shall not call sys_abort... +*/ |
LEVEL level_alloc_descriptor(); |
used to register a level module. |
The function is usually called at module registration time. The |
function can also be called when the system is already started, to |
allow the implementation of dynamic module registration. |
The argument must be the size of the data block that have to be allocated |
The function returns the number of the descriptor allocated for the module |
or -1 in case there are no free descriptors. |
The function also reserves a descriptor with size s, initialized |
with default function pointers. |
+*/ |
LEVEL level_alloc_descriptor(size_t s); |
/*+ This function release a level descriptor previously allocated using |
level_alloc_descriptor(). |
The function returns 0 if the level has been freed, or -1 if someone is |
using it, -2 if the level has never been registered. |
+*/ |
int level_free_descriptor(LEVEL l); |
/* Call this if you want to say that your module is using module l |
(e.g., for calling its private functions) */ |
int level_use_descriptor(LEVEL l); |
/* Call this when you no more need the module l */ |
int level_unuse_descriptor(LEVEL l); |
/*+ This function returns a resource_des **. the value returned shall be |
used to register a resource module. The function shall be called only at |
module registration time. It assume that the system is not yet |
137,15 → 156,8 |
(when uses some defines contained in const.h) +*/ |
int sys_atrunlevel(void (*func_code)(void *),void *parm, BYTE when); |
/*+ These functions can be used to set a nice (I hope) default |
signal handler. If not used, they are not linked. |
They returns -1 if an error occurred, as done by the |
sigaction primitive. +*/ |
int set_exchandler_grx(); |
int set_exchandler_text(); |
/*---------------------------------------------------------------------*/ |
/* Kernel global functions: scheduler, queues */ |
/* Kernel global functions: scheduler, */ |
/*---------------------------------------------------------------------*/ |
/*+ This is the generic scheduler. |
157,31 → 169,13 |
the end of an event list +*/ |
void event_need_reschedule(); |
/* Simple QUEUE management functions */ |
void q_insert (PID p, QUEUE *q); |
void q_timespec_insert (PID p, QUEUE *q); |
void q_extract (PID p, QUEUE *q); |
PID q_getfirst ( QUEUE *q); |
void q_insertfirst (PID p, QUEUE *q); |
/* QQUEUE management functions */ |
void qq_init ( QQUEUE *q); |
void qq_insert (PID p, QQUEUE *q); |
void qq_timespec_insert (PID p, QQUEUE *q); |
void qq_extract (PID p, QQUEUE *q); |
PID qq_getfirst ( QQUEUE *q); |
void qq_insertfirst (PID p, QQUEUE *q); |
void qq_insertlast (PID p, QQUEUE *q); |
PID qq_queryfirst ( QQUEUE *q); |
PID qq_querylast ( QQUEUE *q); |
void task_makefree(void *ret); |
void check_killed_async(void); |
int guarantee(); |
void levels_init(void); /* see init.c */ |
void runlevel_init(); |
void call_runlevel_func(int runlevel, int aborting); |
221,15 → 215,6 |
return ll_context_from(); |
} |
#ifdef __TEST1__ |
extern int useds; |
extern int testactive; |
extern struct timespec s_send[]; |
#endif |
/*+ this functions are called every time a context is changed +*/ |
void kern_after_dispatch(void); |
243,10 → 228,6 |
{ |
ll_context_to(c); |
kern_after_dispatch(); |
#ifdef __TEST1__ |
if (testactive) ll_gettime(TIME_EXACT,&s_send[useds-1] ); |
#endif |
sti(); |
} |
276,6 → 257,8 |
return e; |
} |
#define kern_event_delete event_delete |
/*+ the default capacity timer used by the kernel... +*/ |
void capacity_timer(void *arg); |
282,17 → 265,17 |
#define kern_printf message |
extern __inline__ TIME kern_gettime(struct timespec *t) |
{ |
return ll_gettime(TIME_EXACT, t); |
} |
/*---------------------------------------------------------------------*/ |
/* Kernel global functions: IRQ, errors and exception handling */ |
/* Kernel global functions: IRQ handling */ |
/*---------------------------------------------------------------------*/ |
/*+ Generic exception trapping routine +*/ |
void act_exc(int code); |
/* |
* User level interrupt/exception primitives |
*/ |
/*+ Interrupt handler installation +*/ |
int handler_set(int no, void (*fast)(int), PID pi); |
330,18 → 313,6 |
/*+ prints an error message (see perror.c) +*/ |
void perror (const char *s); |
/*+ Convert a status in a string. Useful for sys_status and level_status +*/ |
char *status_to_a(WORD status); |
/*+ this primitive prints the status of the system. cw contains a set of |
the statuses to be prompted... see const.h +*/ |
void sys_status(DWORD cw); |
/*+ sys_status flags +*/ |
#define CLOCK_STATUS 1 |
#define SCHED_STATUS 2 |
#define MEM_STATUS 4 |
/*+ this primitive returns the time read from the system timer +*/ |
TIME sys_gettime(struct timespec *t); |
387,6 → 358,34 |
void jet_update_endcycle(); |
/*---------------------------------------------------------------------*/ |
/* Internal Macros */ |
/*---------------------------------------------------------------------*/ |
extern __inline__ void kern_epilogue_macro(void) |
{ |
TIME tx; /* a dummy used for time computation */ |
struct timespec ty; /* a dummy used for time computation */ |
kern_gettime(&schedule_time); |
/* manage the capacity event */ |
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
tx = TIMESPEC2USEC(&ty); |
proc_table[exec_shadow].avail_time -= tx; |
jet_update_slice(tx); |
/* if the event didn't fire before, we delete it. */ |
if (cap_timer != NIL) { |
kern_event_delete(cap_timer); |
cap_timer = NIL; |
} |
} |
/* This function is called by the kernel into kern.c to register a default |
exception handler */ |
int set_default_exception_handler(void); |
/*---------------------------------------------------------------------*/ |
/* Task management primitives */ |
/*---------------------------------------------------------------------*/ |
529,19 → 528,18 |
/*+ Enable the preemption mechanism on the task. +*/ |
void task_preempt(void); |
/*+ sends a message to the scheduling module that is handling the task +*/ |
int task_message(void *m, int reschedule); |
/*+ This function signals to the kernel that the current istance of |
the task (periodic or aperiodic) is ended; so the task can be |
suspended until it is activated again. Pending activations may be saved |
depending on the task model +*/ |
void task_endcycle(void); |
extern __inline__ void task_endcycle(void) |
{ |
task_message(NULL, 1); |
} |
/*+ This function suspend the actual task until an explicit activation |
Pending activations are discarded +*/ |
void task_sleep(void); |
/*+ This function suspend the actual task for a minimum delay time +*/ |
void task_delay(DWORD delay); |
/*+ This primitives refers the group id which is supplied |
by the application, not by the kernel +*/ |
int group_activate(WORD g); |
/shark/tags/rel_0_3/include/kernel/model.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: model.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: model.h,v 1.2 2003-01-07 17:12:19 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:19 $ |
------------ |
This file contains the definitions of the task and resource models. |
55,8 → 55,8 |
#define __KERNEL_MODEL_H__ |
#include "ll/ll.h" |
#include "kernel/types.h" |
/* ----------------------------------------------------------------------- |
----------------------------------------------------------------------- |
----------------------------------------------------------------------- |
165,6 → 165,7 |
typedef struct { |
WORD pclass; |
LEVEL level; |
size_t stacksize; |
void *stackaddr; |
WORD group; |
210,13 → 211,13 |
/* Some macros to set various task-model parameters */ |
#define task_default_model(m,p) (m).pclass = (p), \ |
(m).level = 0; \ |
(m).stacksize = 4096, \ |
(m).stackaddr = NULL, \ |
(m).group = 0, \ |
(m).arg = NULL,\ |
(m).control = 0 |
#define task_def_level(m,l) (m).pclass = ((m).pclass & 0xFF00) | \ |
((l) & 0xFF) |
#define task_def_level(m,l) (m).level = (l) |
#define task_def_arg(m,a) (m).arg = (a) |
#define task_def_stack(m,s) (m).stacksize = (s) |
#define task_def_stackaddr(m,s) (m).stackaddr = (s) |
237,16 → 238,13 |
PCLASS values |
----------------------------------------------------------------------- */ |
/*+ These are the value for the pclass field; |
a level l that accept a task model with pclass p |
accept also the alias pclass (p | l) |
=> the LSByte MUST be 0 (256 levels maximum) +*/ |
/* These are the value for the pclass field */ |
#define DUMMY_PCLASS 0x0000 |
#define HARD_PCLASS 0x0100 |
#define SOFT_PCLASS 0x0200 |
#define NRT_PCLASS 0x0300 |
#define JOB_PCLASS 0x0400 |
#define DUMMY_PCLASS 0 |
#define HARD_PCLASS 1 |
#define SOFT_PCLASS 2 |
#define NRT_PCLASS 3 |
#define JOB_PCLASS 4 |
/* ----------------------------------------------------------------------- |
559,11 → 557,12 |
+*/ |
typedef struct { |
int rclass; // protocollo a cui si riferisce il modello di task |
int rclass; /* protocol */ |
RLEVEL level; /* level */ |
} RES_MODEL; |
#define res_default_model(r, p) (r).rclass = (p) |
#define res_def_level(r,l) (r).rclass = ((r).rclass & 0xFF00) | (l) |
#define res_default_model(r, p) (r).rclass = (p), (r).level = 0 |
#define res_def_level(r,l) (r).level = (l) |
/shark/tags/rel_0_3/include/kernel/descr.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: descr.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: descr.h,v 1.3 2003-01-07 17:12:19 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:12:19 $ |
------------ |
Kernel main data structures |
70,6 → 70,7 |
#include <ll/ll.h> |
#include <kernel/model.h> |
#include <kernel/types.h> |
#include <kernel/iqueue.h> |
#include <limits.h> |
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ |
129,8 → 130,6 |
int sigpending; /*+ The signal pending mask +*/ |
int sigwaiting; /*+ The signal waiting mask +*/ |
struct timespec request_time; |
/*+ Last request time for the task +*/ |
int avail_time; /*+ the time the task can execute before a |
timer fire. see also the control field |
and bits related in model.h +*/ |
140,8 → 139,8 |
struct _task_handler_rec *cleanup_stack; |
/*+ The cleanup stack +*/ |
QUEUE next,prev; /*+ Next/Prev Index in the queue +*/ |
int errnumber; |
/* Job Execution Time fields */ |
174,8 → 173,7 |
* the generic kernel, with exclusion of delay_timer that is used |
* also in cond_timedwait |
*/ |
DWORD priority; /*+ A priority field +*/ |
struct timespec timespec_priority; /*+ Another priority field +*/ |
int delay_timer; /*+ A field useful to store the delay timer +*/ |
int wcet; /*+ a worst case time execution +*/ |
193,139 → 191,107 |
All the informations that depends on the particular module are put |
in the level module files. |
The initialization of a level is splitted in two parts: |
- the registration -> called before the system initialization, typically |
AFTER the resource registration |
- the level_init -> called during the system initialization, |
BEFORE the resource_init(s) |
Here a small description of the various functions: |
------------------------------------------------------------------- |
- PUBLIC Functions: |
on one side, a module should export an interface to the Generic |
Kernel, giving a set of functions that the Generic Kernel can use |
to ask a service to the module. That is, the Public Functions are |
called ONLY by the Generic Kernel. |
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/ |
- PRIVATE Functions: on the other side, a module can export an |
interface to the public part of the same or of another |
module. That is, Private Functions are called ONLY by Public and |
Private Functions. |
------------------------------------------------------------------- |
typedef struct { |
char level_name[MAX_LEVELNAME]; /*+ for statistical pourposes +*/ |
WORD level_code; /*+ level identification code +*/ |
BYTE level_version; /*+ level version +*/ |
int (*private_insert )(LEVEL l, PID p, TASK_MODEL *m); |
Inserts a task into the internal module data structure. |
void (*private_extract )(LEVEL l, PID p); |
Removes a task from the internal module data structure. |
/* LEVEL CALLS */ |
int (*level_accept_task_model)(LEVEL l, TASK_MODEL *m); |
/*+ models that a task can manage. returns |
0 if the level can manage the model, |
-1 if not +*/ |
int (*private_eligible)(LEVEL l, PID p); |
A task inserted into the internal module data structure needs to be |
scheduled. returns 0 if it can be scheduled, -1 if not. |
int (*level_accept_guest_model)(LEVEL l, TASK_MODEL *m); |
/*+ models that a task can manage as guest |
tasks. returns |
0 if the level can manage the model, |
-1 if not +*/ |
void (*private_dispatch)(LEVEL l, PID p, int nostop); |
A task inserted into the internal module data structure has been dispatched. |
// void (*level_init)(); /*+ initialization of the level module +*/ |
// void (*level_end)(); /*+ level termination (at system end... +*/ |
void (*level_status)(LEVEL l);/*+ print level statistics... +*/ |
void (*private_epilogue)(LEVEL l, PID p); |
A task inserted into the internal module data structure has been preempted. |
PID (*level_scheduler)(LEVEL l); |
/*+ the level scheduler returns a task |
chosen among those belonging to the |
level +*/ |
int (*level_guarantee)(LEVEL l, bandwidth_t *freebandwidth); |
/*+ 0 if the level is guaranteed, -1 if not |
no guarantee if (*f)()=null |
the function updates the parameter |
(see guarantee() ) +*/ |
/* TASK CALLS */ |
int (*task_create)(LEVEL l, PID p, TASK_MODEL *m); |
/*+ the task p is created into the level |
returns 0->ok, -1->error +*/ |
void (*task_detach)(LEVEL l, PID p); |
/*+ there is an error in the task_create |
after the task call task_create. |
The function delete all the informations |
about the task in the level. |
For the resources levels there is the |
res_detach: res_detach is called also |
when killing a task +*/ |
int (*task_eligible)(LEVEL l, PID p); |
/*+ correctness control when a task is |
chosen by a level scheduler (used with |
aperiodic servers) 0->ok, -1->no +*/ |
void (*task_dispatch)(LEVEL l, PID p, int nostop); |
/*+ a task go in the EXEC status (called |
by dispatch() ) +*/ |
void (*task_epilogue)(LEVEL l, PID p); |
/*+ a task has finished the current slice+*/ |
PID (*public_scheduler)(LEVEL l); |
returns a task to schedule, or -1 if no tasks are ready |
void (*task_activate)(LEVEL l, PID p); |
/*+ the task is activated... +*/ |
int (*public_guarantee)(LEVEL l, bandwidth_t *freebandwidth); |
returns 0 if the level is guaranteed, -1 if not |
no guarantee if (*f)()=null |
the function updates the parameter freebandwidth (see guarantee() ) |
int (*public_create )(LEVEL l, PID p, TASK_MODEL *m); |
the task p is created into the module |
returns 0->ok, -1->error |
void (*task_insert)(LEVEL l, PID p); |
/*+ opposite to task_extract +*/ |
void (*task_extract)(LEVEL l, PID p); |
/*+ remove the task from the "ready" (if any) |
queue +*/ |
void (*public_detach )(LEVEL l, PID p); |
there is an error in the public_create. The function removes all the |
informations about the task in the module. |
void (*task_endcycle)(LEVEL l, PID p); |
/*+ the (periodic) task finish the cycle +*/ |
void (*task_end)(LEVEL l, PID p); |
/*+ the task is killed; we have to remove |
it from the level queues, test if it |
is in the exec state, etc... it can |
modify the state of the task (-> FREE, |
ZOMBIE...), but |
cannot call the scheduler directly (it |
is called by the task_makefree. |
Note: the task can be in a state |
different from those managed by the |
level because the task may be blocked. |
the res_detach is in any case called |
AFTER the task_end. +*/ |
void (*public_end )(LEVEL l, PID p); |
the task has been killed, or it ended regularly |
void (*task_sleep)(LEVEL l, PID p); |
/*+ this function will fall asleep the |
task in the EXE state. +*/ |
int (*public_eligible )(LEVEL l, PID p); |
A task needs to be scheduled. returns 0 if it can be scheduled, -1 if not. |
void (*public_dispatch )(LEVEL l, PID p, int nostop); |
A task has been dispatched. |
void (*public_epilogue )(LEVEL l, PID p); |
A task has been preempted (or its capacity is exausted). |
void (*task_delay)(LEVEL l, PID p,DWORD tickdelay); |
void (*public_activate )(LEVEL l, PID p); |
A task has been activated. |
void (*public_unblock )(LEVEL l, PID p); |
void (*public_block )(LEVEL l, PID p); |
A task has been unblocked/blocked on a synchronization point |
(e.g. a semaphore, a mailbox, a nanosleep). |
/* guest CALLS: |
these functions are called from an Aperiodic Server Level for the task |
that are inserted in the local queues */ |
int (*guest_create)(LEVEL l, PID p, TASK_MODEL *m); |
/*+ the task is already created in another |
level and it is inserted in the current |
level; returns 0->ok, -1->error +*/ |
void (*guest_detach)(LEVEL l, PID p); |
/*+ there is an error in a task creation |
of a task made by an aperiodic server |
The function delete all the informations |
about the task in the level. +*/ |
void (*guest_dispatch)(LEVEL l, PID p, int nostop); |
/*+ a task belonging to another level but |
inserted in the current level go in the |
EXEC status (called by dispatch() ) +*/ |
void (*guest_epilogue)(LEVEL l, PID p); |
/*+ a task has finished the current slice+*/ |
int (*public_message )(LEVEL l, PID p, void *m); |
A task sent a message m to the module. |
void (*guest_activate)(LEVEL l, PID p); |
/*+ the task is activated... +*/ |
If the message has value NULL the |
behavior should be the task_endcycle primitive behavior. |
void (*guest_insert)(LEVEL l, PID p); |
/*+ remove the task from the "ready" (if any) |
queue +*/ |
void (*guest_extract)(LEVEL l, PID p); |
/*+ opposite to guest_insert +*/ |
The function returns an integer to the user. |
If you want to avoid the call to public_epilogue, after public_message, |
just write exec = exec_shadow = -1; in your public_message code. |
void (*guest_endcycle)(LEVEL l, PID p); |
/*+ the task finish the cycle +*/ |
void (*guest_end)(LEVEL l, PID p); |
/*+ the task is killed +*/ |
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/ |
void (*guest_sleep)(LEVEL l, PID p); |
void (*guest_delay)(LEVEL l, PID p, TIME tickdelay); |
typedef struct { |
void (*private_insert )(LEVEL l, PID p, TASK_MODEL *m); |
void (*private_extract )(LEVEL l, PID p); |
int (*private_eligible)(LEVEL l, PID p); |
void (*private_dispatch)(LEVEL l, PID p, int nostop); |
void (*private_epilogue)(LEVEL l, PID p); |
PID (*public_scheduler)(LEVEL l); |
int (*public_guarantee)(LEVEL l, bandwidth_t *freebandwidth); |
int (*public_create )(LEVEL l, PID p, TASK_MODEL *m); |
void (*public_detach )(LEVEL l, PID p); |
void (*public_end )(LEVEL l, PID p); |
int (*public_eligible )(LEVEL l, PID p); |
void (*public_dispatch )(LEVEL l, PID p, int nostop); |
void (*public_epilogue )(LEVEL l, PID p); |
void (*public_activate )(LEVEL l, PID p); |
void (*public_unblock )(LEVEL l, PID p); |
void (*public_block )(LEVEL l, PID p); |
int (*public_message )(LEVEL l, PID p, void *m); |
} level_des; |
350,27 → 316,18 |
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/ |
typedef struct { |
char res_name[MAX_MODULENAME];/*+ for statistical pourposes +*/ |
WORD res_code; /*+ resource module identification code +*/ |
BYTE res_version; /*+ resource module version +*/ |
int rtype; /*+ resource module extented interface |
code (see model.h) +*/ |
void (*resource_status)(); /*+ print resource protocol statistics...+*/ |
int (*res_register)(RLEVEL l, PID p, RES_MODEL *r); |
/*+ When the system knows that a |
resource model can be registered |
by a level, it calls this |
function. It registers all the |
information about the task and the |
model. returns 0 if the model |
can be handled, -1 otherwise+*/ |
int (*level_accept_resource_model)(RLEVEL l, RES_MODEL *r); |
/*+ this function is called when the process |
is created. it returns 0 if the RES_MODEL |
can be managed by the level,-1 if not+*/ |
void (*res_register)(RLEVEL l, PID p, RES_MODEL *r); |
/*+ When the system knows that a resource |
model can be registered by a level, |
it calls this function. It registers all |
the information about the task and the |
model. +*/ |
void (*res_detach)(RLEVEL l, PID p); |
/*+ this function is called when the task |
is killed or some error is occurred |
417,13 → 374,10 |
typedef struct { |
resource_des r; |
int (*level_accept_mutexattr)(RLEVEL l, const mutexattr_t *a); |
/*+ this function is called when a mutex |
is created. it returns 0 if the |
mutexattr_t |
can be managed by the level,-1 if not+*/ |
int (*init) (RLEVEL l, mutex_t *m, const mutexattr_t *a); |
/*+ this function is called when a mutex is created. it returns |
>=0 if the mutexattr_t can be managed by the level (=0 Ok, an |
error otherwise), -1 otherwise +*/ |
int (*destroy)(RLEVEL l, mutex_t *m); |
int (*lock) (RLEVEL l, mutex_t *m); |
int (*trylock)(RLEVEL l, mutex_t *m); |
438,7 → 392,7 |
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/ |
typedef struct condition_struct { |
QUEUE waiters; /*+ queue for tasks waiting on the condition +*/ |
IQUEUE waiters; /*+ queue for tasks waiting on the condition +*/ |
mutex_t *used_for_waiting; |
} cond_t; |
/shark/tags/rel_0_3/include/kernel/const.h |
---|
16,11 → 16,11 |
/** |
------------ |
CVS : $Id: const.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: const.h,v 1.2 2003-01-07 17:12:19 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:19 $ |
------------ |
System constants: |
156,17 → 156,13 |
/*+ sys_atrunlevel status bit values: used to signal that the function |
has to be called +*/ |
#define RUNLEVEL_STARTUP 0 /*+ startup in real mode +*/ |
#define RUNLEVEL_INIT 1 /*+ init time +*/ |
#define RUNLEVEL_SHUTDOWN 2 /*+ shutting down the system +*/ |
#define RUNLEVEL_BEFORE_EXIT 3 /*+ before the kernel has been terminated +*/ |
#define RUNLEVEL_AFTER_EXIT 4 /*+ after the kernel has been terminated +*/ |
#define RUNLEVEL_RUNNING 2 /*+ system is running!!! +*/ |
#define RUNLEVEL_SHUTDOWN 3 /*+ shutting down the system +*/ |
#define RUNLEVEL_BEFORE_EXIT 4 /*+ before the kernel has been terminated +*/ |
#define RUNLEVEL_AFTER_EXIT 5 /*+ after the kernel has been terminated +*/ |
#define NO_AT_ABORT 8 /*+ only when sys_end is called +*/ |
/* |
#define EXC_CLEAR 0 |
#define EXC_INSTALLED 1 |
#define EXC_ACTIVE 2 |
#define EXC_MASKED 4 |
*/ |
#endif /* __CONST_H__ */ |
/shark/tags/rel_0_3/include/kernel/var.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: var.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: var.h,v 1.3 2003-01-07 17:12:19 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:12:19 $ |
------------ |
Kernel global variables |
63,7 → 63,17 |
/*---------------------------------------------------------------------*/ |
extern proc_des proc_table[]; /*+ Process descriptor table +*/ |
extern level_des *level_table[]; /*+ Level descriptor table +*/ |
/* for a description of the following fields, look in kernel/kern.c */ |
extern size_t level_size[]; |
extern int level_used[]; |
extern int level_first; |
extern int level_last; |
extern int level_free; |
extern int level_next[]; |
extern int level_prev[]; |
extern resource_des *resource_table[]; /*+ Resource descriptor table +*/ |
70,7 → 80,7 |
extern PID exec; /*+ task suggested by the scheduler +*/ |
extern PID exec_shadow; /*+ task really executed +*/ |
extern QUEUE freedesc; /*+ Free descriptor handled as a queue +*/ |
extern IQUEUE freedesc; /*+ Free descriptor handled as a queue +*/ |
extern TIME sys_tick; /*+ System tick (in usec) +*/ |
extern struct timespec schedule_time; |
84,7 → 94,6 |
event is posted. Normally, it is |
equal to schedule_time +*/ |
extern DWORD sched_levels; /*+ Schedule levels active in the system +*/ |
extern DWORD res_levels; /*+ Resource levels active in the system +*/ |
extern int task_counter; /*+ Application task counter. It represent |
/shark/tags/rel_0_3/include/kernel/iqueue.h |
---|
0,0 → 1,197 |
/* |
* Project: S.Ha.R.K. |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors : |
* Paolo Gai <pj@gandalf.sssup.it> |
* (see the web pages for full authors list) |
* |
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
*/ |
/* |
------------ |
CVS : $Id: iqueue.h,v 1.1 2002-11-11 08:36:01 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1 $ |
Last update: $Date: 2002-11-11 08:36:01 $ |
------------ |
*/ |
/* |
* Copyright (C) 2002 Paolo Gai |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* |
*/ |
/* |
IQUEUEs |
This file contains functions that helps to manage task queues. |
These functions are different from the functions that manages the |
QUEUE and QQUEUE types. In particular, these functions no more relies |
on the prev & next fields of the task descriptor. In that way, tasks |
can be inserted in more than one queue at a time. |
Basically, an IQUEUE has an "I"nternal prev/next structure, that may |
be shared between one or more queue. Of course, the user MUST |
guarantee that the same task will not be inserted in two IQUEUEs that |
share the same prev/next buffer. |
The queue insertion is made by the following functions: |
iq_insert -> insertion based on the priority field. |
iq_timespec_insert -> same as above but use the timespec_priority field |
iq_insertfirst -> insert in the first position of the queue |
*/ |
#include <ll/ll.h> |
#include <kernel/const.h> |
#include <kernel/types.h> |
#ifndef __KERNEL_IQUEUE_H__ |
#define __KERNEL_IQUEUE_H__ |
#define IQUEUE_NO_PRIORITY 1 |
#define IQUEUE_NO_TIMESPEC 2 |
struct IQUEUE_shared { |
PID prev[MAX_PROC]; |
PID next[MAX_PROC]; |
struct timespec *timespec_priority; |
DWORD *priority; |
}; |
typedef struct { |
PID first; |
PID last; |
struct IQUEUE_shared *s; |
} IQUEUE; |
/* Internal queue initialization: |
share = &x -> the internal data structure of the IQUEUE x is used |
to enqueue the tasks. |
share = NULL -> an internal data structure to handle prev/next |
pairs is dynamically allocated (The amount of |
memory that is allocated can be reduced using the |
flags). |
flags can be used to reduce the memory usage of an IQUEUE when share=NULL: |
IQUEUE_NO_PRIORITY -> the iqueue do not provide internally a priority field |
IQUEUE_NO_TIMESPEC -> the iqueue do not provide internally a timespec field |
- note that, if these flags are used, the corresponding insert |
functions will not work! |
- the default value for the flags is, of course, 0 |
*/ |
void iq_init (IQUEUE *q, IQUEUE *share, int flags); |
/* Queue insert functions: |
- inserts a p into the q. p must not be already inserted into q. |
- four versions of the function; |
- iq_priority_insert -> ordered insertion using the priority field |
- iq_timespec_insert -> ordered insertion using the timespec field |
- iq_insertfirst -> insert at the first position of the queue |
- iq_insertlast -> insert at the last position of the queue |
*/ |
void iq_priority_insert (PID p, IQUEUE *q); |
void iq_timespec_insert (PID p, IQUEUE *q); |
void iq_insertfirst (PID p, IQUEUE *q); |
void iq_insertlast (PID p, IQUEUE *q); |
/* Queue extract functions: |
- extracts a task p from the queue q. |
- three versions of the function; |
- iq_extract -> extracts given a task p |
(that must be inserted in the queue) |
- iq_getfirst -> extracts the first task in the queue, |
NIL if the queue is empty |
- iq_getlast -> extracts the last task in the queue, |
NIL if the queue is empty |
*/ |
void iq_extract (PID p, IQUEUE *q); |
PID iq_getfirst ( IQUEUE *q); |
PID iq_getlast ( IQUEUE *q); |
/* Queue query functions: |
The first two functions return the first and the last task in the queue, |
NIL if the queue is empty. |
The second two functions can be used to get/set the priority or the |
timespec field used when queuing. |
*/ |
static __inline__ PID iq_query_first(IQUEUE *q) |
{ |
return q->first; |
} |
static __inline__ PID iq_query_last(IQUEUE *q) |
{ |
return q->last; |
} |
static __inline__ struct timespec *iq_query_timespec(PID p, IQUEUE *q) |
{ |
return &q->s->timespec_priority[p]; |
} |
static __inline__ DWORD *iq_query_priority (PID p, IQUEUE *q) |
{ |
return &q->s->priority[p]; |
} |
/* Queue iterators */ |
/* sometimes it is useful to go through the list. For that reason |
You can use the following two functions... */ |
static __inline__ PID iq_query_next (PID p, IQUEUE *q) |
{ |
return q->s->next[p]; |
} |
static __inline__ PID iq_query_prev (PID p, IQUEUE *q) |
{ |
return q->s->prev[p]; |
} |
/* Queue test functions */ |
static __inline__ int iq_isempty (IQUEUE *q) |
{ |
return q->first == NIL; |
} |
#endif |
/shark/tags/rel_0_3/include/kernel/int_sem.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: int_sem.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: int_sem.h,v 1.2 2002-11-11 08:36:01 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2002-11-11 08:36:01 $ |
------------ |
Internal semaphores. |
63,12 → 63,13 |
#define __INT_SEM_H__ |
#include <kernel/types.h> |
#include <kernel/iqueue.h> |
/* this is the structure normally pointed by the opt field in the |
mutex_t structure */ |
typedef struct { |
int count; |
QQUEUE blocked; |
IQUEUE blocked; |
} internal_sem_t; |
/shark/tags/rel_0_3/include/kernel/kern.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: kern.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: kern.h,v 1.2 2002-11-11 08:36:01 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2002-11-11 08:36:01 $ |
------------ |
Main kernel include file. |
67,6 → 67,8 |
//#include <kernel/err.h> |
//#include <kernel/exc.h> |
#include <kernel/var.h> |
#include <kernel/iqueue.h> |
#include <kernel/func.h> |
/shark/tags/rel_0_3/include/kernel/types.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: types.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: types.h,v 1.2 2002-11-11 08:36:01 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2002-11-11 08:36:01 $ |
------------ |
**/ |
49,27 → 49,9 |
* |
*/ |
/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ |
HARTIK SYSTEM TYPES |
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ |
#ifndef __KERNEL_TYPES_H__ |
#define __KERNEL_TYPES_H__ |
/*+ Used to manage task queues +*/ |
typedef int QUEUE; |
/*+ Used to manage task queues with tail +*/ |
typedef struct { |
int first; /*+ first element of a task queue, NIL if empty +*/ |
int last; /*+ last element of a task qqueue, NIL if empty +*/ |
} QQUEUE; |
/*+ Used to manage mutex queues +*/ |
//typedef int MQUEUE; |
#define TASK void * |
/*+ ... a task index +*/ |
/shark/tags/rel_0_3/include/kernel/config.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: config.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: config.h,v 1.2 2002-11-11 08:36:01 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2002-11-11 08:36:01 $ |
------------ |
Kernel configuration macros: |
57,28 → 57,6 |
#ifndef __KERNEL_CONFIG_H__ |
#define __KERNEL_CONFIG_H__ |
/*+ Define this if you use the CABs... +*/ |
#define __CAB__ |
/*+ Define this if you use the ports... +*/ |
#define __PORT__ |
/*+ Define this if you use the tracer... +*/ |
#define __TRACE__ |
//#undef __TRACE__ |
/*+ Define this if you want the printk messages... +*/ |
#define __DEBUG_ON__ |
#undef __DEBUG_ON__ |
/*+ checks the Memory at the kern_mem_init... +*/ |
#undef __MEM_DEBUG__ |
/*+ defined if we are compiling test1.c with init1.c +*/ |
//#define __TEST1__ |
/*+ defined if we are compiling testG.c +*/ |
//#define TESTG |
/shark/tags/rel_0_3/include/modules/codes.h |
---|
File deleted |
/shark/tags/rel_0_3/include/modules/dummy.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: dummy.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: dummy.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the scheduling module RR (Round Robin) |
78,6 → 78,8 |
#ifndef __DUMMY_H__ |
#define __DUMMY_H__ |
#include <kernel/types.h> |
/*+ |
On upper Intel CPUs it is possible to avoid CPU power consumption |
when the system is idle issuing the hlt instruction. |
86,7 → 88,10 |
+*/ |
#define __HLT_WORKS__ |
/*+ Registration function +*/ |
void dummy_register_level(); |
/*+ Registration function |
returns the level number at which the module has been registered. |
+*/ |
LEVEL dummy_register_level(); |
#endif |
/shark/tags/rel_0_3/include/modules/nop.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: nop.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: nop.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the No Protocol (NOP) implementation of mutexes |
79,6 → 79,8 |
#ifndef __NOP_H__ |
#define __NOP_H__ |
void NOP_register_module(void); |
#include <kernel/types.h> |
RLEVEL NOP_register_module(void); |
#endif |
/shark/tags/rel_0_3/include/modules/edf.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: edf.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: edf.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the scheduling module EDF (Earliest Deadline First) |
123,7 → 123,6 |
#include <kernel/config.h> |
#include <sys/types.h> |
#include <kernel/types.h> |
#include <modules/codes.h> |
/*+ flags... +*/ |
137,9 → 136,12 |
/*+ Registration function: |
int flag Options to be used in this level instance... +*/ |
void EDF_register_level(int flag); |
int flag Options to be used in this level instance... |
returns the level number at which the module has been registered. |
+*/ |
LEVEL EDF_register_level(int flag); |
/*+ Returns the used bandwidth of a level +*/ |
bandwidth_t EDF_usedbandwidth(LEVEL l); |
/shark/tags/rel_0_3/include/modules/posix.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: posix.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: posix.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the scheduling module compatible with POSIX |
96,7 → 96,6 |
#include <kernel/config.h> |
#include <sys/types.h> |
#include <kernel/types.h> |
#include <modules/codes.h> |
extern TASK __init__(void *arg); |
112,8 → 111,11 |
/*+ Registration function: |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void POSIX_register_level(TIME slice, |
struct multiboot_info *mb used if createmain specified |
returns the level number at which the module has been registered. |
+*/ |
LEVEL POSIX_register_level(TIME slice, |
int createmain, |
struct multiboot_info *mb, |
int prioritylevels); |
/shark/tags/rel_0_3/include/modules/pc.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: pc.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: pc.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the Priority Ceiling (PC) Protocol |
93,7 → 93,7 |
#include <kernel/types.h> |
#include <kernel/descr.h> |
void PC_register_module(void); |
RLEVEL PC_register_module(void); |
/*+ This function gets the ceiling of a PC mutex, and it have to be called |
only by a task that owns the mutex. |
/shark/tags/rel_0_3/include/modules/bd_edf.h |
---|
38,11 → 38,11 |
*/ |
/* |
* CVS : $Id: bd_edf.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
* CVS : $Id: bd_edf.h,v 1.2 2003-01-07 17:12:19 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:51 $ |
* Revision: $Revision: 1.2 $ |
* Last update: $Date: 2003-01-07 17:12:19 $ |
*/ |
#ifndef __BD_EDF_H__ |
51,7 → 51,7 |
#include <kernel/types.h> |
#include <kernel/descr.h> |
void BD_EDF_register_module(void); |
RLEVEL BD_EDF_register_module(void); |
TIME bd_edf_getdl(void); |
/shark/tags/rel_0_3/include/modules/srp.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: srp.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: srp.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the Stack Resource Policy (SRP) Protocol |
108,7 → 108,7 |
#include <kernel/model.h> |
#include <kernel/descr.h> |
void SRP_register_module(void); |
RLEVEL SRP_register_module(void); |
extern __inline__ RES_MODEL *SRP_usemutex(mutex_t *m) { |
return (RES_MODEL *)m->opt; |
/shark/tags/rel_0_3/include/modules/rr2.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: rr2.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: rr2.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the scheduling module RR (Round Robin) |
94,7 → 94,6 |
#include <kernel/config.h> |
#include <sys/types.h> |
#include <kernel/types.h> |
#include <modules/codes.h> |
extern TASK __init__(void *arg); |
110,8 → 109,11 |
/*+ Registration function: |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void RR2_register_level(TIME slice, |
struct multiboot_info *b used if createmain specified |
returns the level number at which the module has been registered. |
+*/ |
LEVEL RR2_register_level(TIME slice, |
int createmain, |
struct multiboot_info *mb); |
/shark/tags/rel_0_3/include/modules/ds.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: ds.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: ds.h,v 1.2 2003-01-07 17:12:19 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:19 $ |
------------ |
This file contains the aperiodic server DS (Polling Server) |
92,7 → 92,6 |
#include <kernel/config.h> |
#include <sys/types.h> |
#include <kernel/types.h> |
#include <modules/codes.h> |
/*+ 1 - ln(2) +*/ |
#ifndef RM_MINFREEBANDWIDTH |
119,9 → 118,12 |
int flags Options to be used in this level instance... |
LEVEL master the level that must be used as master level for the |
TBS tasks |
int num,den used to compute the TBS bandwidth +*/ |
void DS_register_level(int flags, LEVEL master, int Cs, int per); |
int num,den used to compute the TBS bandwidth |
returns the level number at which the module has been registered. |
+*/ |
LEVEL DS_register_level(int flags, LEVEL master, int Cs, int per); |
/*+ Returns the used bandwidth of a level +*/ |
bandwidth_t DS_usedbandwidth(LEVEL l); |
/shark/tags/rel_0_3/include/modules/cbs.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: cbs.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: cbs.h,v 1.2 2003-01-07 17:12:19 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:19 $ |
------------ |
This file contains the aperiodic server CBS (Total Bandwidth Server) |
103,7 → 103,6 |
#include <kernel/config.h> |
#include <sys/types.h> |
#include <kernel/types.h> |
#include <modules/codes.h> |
/*+ flags... +*/ |
#define CBS_DISABLE_ALL 0 /*+ Task Guarantee enabled +*/ |
117,8 → 116,10 |
int flags Options to be used in this level instance... |
LEVEL master the level that must be used as master level for the |
CBS tasks |
returns the level number at which the module has been registered. |
+*/ |
void CBS_register_level(int flags, LEVEL master); |
LEVEL CBS_register_level(int flags, LEVEL master); |
/*+ Returns the used bandwidth of a level +*/ |
bandwidth_t CBS_usedbandwidth(LEVEL l); |
/shark/tags/rel_0_3/include/modules/pi.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: pi.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: pi.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the Priority Inheritance (PI) Protocol |
89,6 → 89,8 |
#ifndef __PI_H__ |
#define __PI_H__ |
void PI_register_module(void); |
#include <kernel/types.h> |
RLEVEL PI_register_module(void); |
#endif |
/shark/tags/rel_0_3/include/modules/bd_pscan.h |
---|
39,11 → 39,11 |
*/ |
/* |
* CVS : $Id: bd_pscan.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
* CVS : $Id: bd_pscan.h,v 1.2 2003-01-07 17:12:19 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:51 $ |
* Revision: $Revision: 1.2 $ |
* Last update: $Date: 2003-01-07 17:12:19 $ |
*/ |
#ifndef __BD_PSCAN_H__ |
52,7 → 52,7 |
#include <kernel/types.h> |
#include <kernel/descr.h> |
void BD_PSCAN_register_module(void); |
RLEVEL BD_PSCAN_register_module(void); |
int bd_pscan_getpriority(void); |
/shark/tags/rel_0_3/include/modules/rm.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: rm.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: rm.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the scheduling module RM (Rate Monotonic) |
122,7 → 122,6 |
#include <kernel/config.h> |
#include <sys/types.h> |
#include <kernel/types.h> |
#include <modules/codes.h> |
/*+ 1 - ln(2) +*/ |
143,9 → 142,12 |
/*+ Registration function: |
int flag Options to be used in this level instance... +*/ |
void RM_register_level(int flag); |
int flag Options to be used in this level instance... |
returns the level number at which the module has been registered. |
+*/ |
LEVEL RM_register_level(int flag); |
/*+ Returns the used bandwidth of a level +*/ |
bandwidth_t RM_usedbandwidth(LEVEL l); |
/shark/tags/rel_0_3/include/modules/rrsoft.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: rrsoft.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: rrsoft.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the scheduling module RRSOFT (Round Robin for |
103,7 → 103,6 |
#include <kernel/config.h> |
#include <sys/types.h> |
#include <kernel/types.h> |
#include <modules/codes.h> |
extern TASK __init__(void *arg); |
124,8 → 123,12 |
/*+ Registration function: |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void RRSOFT_register_level(TIME slice, |
struct multiboot_info *mb used if createmain specified |
returns the level number at which the module has been registered. |
+*/ |
LEVEL RRSOFT_register_level(TIME slice, |
int createmain, |
struct multiboot_info *mb, |
BYTE models); |
/shark/tags/rel_0_3/include/modules/ps.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: ps.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: ps.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the aperiodic server PS (Polling Server) |
93,7 → 93,6 |
#include <kernel/config.h> |
#include <sys/types.h> |
#include <kernel/types.h> |
#include <modules/codes.h> |
/*+ 1 - ln(2) +*/ |
#ifndef RM_MINFREEBANDWIDTH |
120,9 → 119,12 |
int flags Options to be used in this level instance... |
LEVEL master the level that must be used as master level for the |
TBS tasks |
int num,den used to compute the TBS bandwidth +*/ |
void PS_register_level(int flags, LEVEL master, int Cs, int per); |
int num,den used to compute the TBS bandwidth |
returns the level number at which the module has been registered. |
+*/ |
LEVEL PS_register_level(int flags, LEVEL master, int Cs, int per); |
/*+ Returns the used bandwidth of a level +*/ |
bandwidth_t PS_usedbandwidth(LEVEL l); |
/shark/tags/rel_0_3/include/modules/rr.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: rr.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: rr.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the scheduling module RR (Round Robin) |
95,7 → 95,6 |
#include <kernel/config.h> |
#include <sys/types.h> |
#include <kernel/types.h> |
#include <modules/codes.h> |
extern TASK __init__(void *arg); |
111,8 → 110,11 |
/*+ Registration function: |
TIME slice the slice for the Round Robin queue |
int createmain 1 if the level creates the main task 0 otherwise |
struct multiboot_info *mb used if createmain specified +*/ |
void RR_register_level(TIME slice, |
struct multiboot_info *mb used if createmain specified |
returns the level number at which the module has been registered. |
+*/ |
LEVEL RR_register_level(TIME slice, |
int createmain, |
struct multiboot_info *mb); |
/shark/tags/rel_0_3/include/modules/ss.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: ss.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: ss.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the aperiodic server SS (Sporadic Server) |
103,7 → 103,6 |
#include <kernel/config.h> |
#include <kernel/types.h> |
#include <sys/types.h> |
#include <modules/codes.h> |
/*+ 1 - ln(2) +*/ |
#ifndef RM_MINFREEBANDWIDTH |
151,9 → 150,12 |
int flags Options to be used in this level instance... |
LEVEL master The level that must be used as master level |
int Cs Server capacity |
int per Server period +*/ |
void SS_register_level(int flags, LEVEL master, int Cs, int per); |
int per Server period |
returns the level number at which the module has been registered. |
+*/ |
LEVEL SS_register_level(int flags, LEVEL master, int Cs, int per); |
/*+ Returns the used bandwidth of a level +*/ |
bandwidth_t SS_usedbandwidth(LEVEL l); |
/shark/tags/rel_0_3/include/modules/tbs.h |
---|
21,11 → 21,11 |
/** |
------------ |
CVS : $Id: tbs.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: tbs.h,v 1.2 2003-01-07 17:12:20 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:12:20 $ |
------------ |
This file contains the aperiodic server TBS (Total Bandwidth Server) |
126,7 → 126,6 |
#include <kernel/config.h> |
#include <sys/types.h> |
#include <kernel/types.h> |
#include <modules/codes.h> |
/*+ flags... +*/ |
#define TBS_DISABLE_ALL 0 |
/shark/tags/rel_0_3/include/trace/qudp.h |
---|
21,16 → 21,26 |
#ifndef __TRACE_QUDP_H |
#define __TRACE_QUDP_H |
#include <kernel/model.h> |
#include <drivers/udpip.h> |
typedef struct TAGtrc_udp_queue_args_t { |
UDP_ADDR addr; |
int size; |
UDP_ADDR local,remote; |
TASK_MODEL *model; |
} TRC_UDP_PARMS; |
#define trc_udp_default_parms(m) |
#define trc_udp_default_parms(m,l,r) (m).size=8192, \ |
(m).model = NULL, \ |
(m).local = (l), \ |
(m).remote = (r); |
#define trc_udp_def_addr(m,addr) |
#define trc_udp_def_size(m,s) (m).size=(s) |
#define trc_udp_def_local(m,l) (m).local=(l) |
#define trc_udp_def_remote(m,r) (m).remote=(r) |
#define trc_udp_def_model(m,mod) (m).model=(mod) |
int trc_register_udp_queue(void); |
#endif |
/shark/tags/rel_0_3/include/trace/queues.h |
---|
38,11 → 38,11 |
*/ |
/* |
* CVS : $Id: queues.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
* CVS : $Id: queues.h,v 1.3 2002-10-28 07:53:40 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:51 $ |
* Revision: $Revision: 1.3 $ |
* Last update: $Date: 2002-10-28 07:53:40 $ |
*/ |
#ifndef __TRACE_QUEUES_H |
58,19 → 58,20 |
void *data; |
} trc_queue_t; |
#define TRC_DUMMY_QUEUE 0 |
#define TRC_DUMMY_QUEUE 0 |
#include <trace/qdummy.h> |
#define TRC_FIXED_QUEUE 1 |
#define TRC_FIXED_QUEUE 1 |
#define TRC_DOSFS_FIXED_QUEUE 2 |
#include <trace/qfixed.h> |
#define TRC_CIRCULAR_QUEUE 2 |
#define TRC_CIRCULAR_QUEUE 3 |
#include <trace/qcirc.h> |
#define TRC_UDP_QUEUE 3 |
#define TRC_UDP_QUEUE 4 |
#include <trace/qudp.h> |
#define TRC_QUEUETYPESNUMBER 4 |
#define TRC_QUEUETYPESNUMBER 5 |
/* uniq!=0 append a this unique number to name */ |
void trc_create_name(char *basename, int uniq, char *pathname); |
/shark/tags/rel_0_3/include/trace/trace.h |
---|
38,11 → 38,11 |
*/ |
/* |
* CVS : $Id: trace.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
* CVS : $Id: trace.h,v 1.2 2002-10-21 10:17:26 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:51 $ |
* Revision: $Revision: 1.2 $ |
* Last update: $Date: 2002-10-21 10:17:26 $ |
*/ |
#ifndef __TRACE_TRACE_H |
68,6 → 68,8 |
/* -- */ |
/* Register a "standard" tracer configuration; requires FAT16 filesystem |
(see documentation) */ |
int TRC_init_phase1_standard(void); |
int TRC_init_phase2_standard(void); |
/shark/tags/rel_0_3/include/trace/qfixed.h |
---|
31,7 → 31,12 |
#define trc_fixed_def_filename(m,s) (m).filename=(s) |
#define trc_fixed_def_size(m,s) (m).size=(s) |
/* Fixed queue, FAT16 filesystem (see kernel/modules/trcfixed.c) */ |
int trc_register_fixed_queue(void); |
/* Fixed queue, DOSFS filesystem (see kernel/modules/trcdfix.c) |
Note: You MUST specify a valid filename... */ |
int trc_register_dosfs_fixed_queue(void); |
#endif |
/shark/tags/rel_0_3/include/bits/errno.h |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: errno.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $ |
CVS : $Id: errno.h,v 1.3 2003-01-07 17:12:19 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:51 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2003-01-07 17:12:19 $ |
------------ |
- error codes used as values for errno |
104,11 → 104,11 |
#define LAST_EXC_NUMBER 19 |
#define XDOUBLE_EXCEPTION 1 /* in act_exc */ |
#define XUNVALID_KILL_SHADOW 2 /* task_makefree */ |
// NOW UNUSED: XDOUBLE_EXCEPTION 1 |
#define XINVALID_KILL_SHADOW 2 /* task_makefree */ |
#define XNOMORE_CLEANUPS 3 /* task_cleanup_push */ |
#define XUNVALID_TASK 4 /* invalid operation for a task */ |
#define XUNVALID_GUEST 5 /* invalid operation for a guest task */ |
#define XINVALID_TASK 4 /* invalid operation for a task */ |
// NOW UNUSED: XINVALID_GUEST 5 /* invalid operation for a guest task */ |
#define XNOMORE_EVENTS 6 /* too many events posted... */ |
#define XDEADLINE_MISS 7 /* PERIODIC_PCLASS, SPORADIC_PCLASS */ |
116,10 → 116,10 |
#define XACTIVATION 9 /* PERIODIC_PCLASS, SPORADIC_PCLASS */ |
#define XMUTEX_OWNER_KILLED 10 /* Mutex */ |
#define XSRP_UNVALID_LOCK 11 /* SRP */ |
#define XSRP_INVALID_LOCK 11 /* SRP */ |
#define XUNVALID_DUMMY_OP 12 /* dummy.h hope it will never called... */ |
#define XUNVALID_SS_REPLENISH 13 /* kernel/mosules/ss.c */ |
#define XINVALID_DUMMY_OP 12 /* dummy.h hope it will never called... */ |
#define XINVALID_SS_REPLENISH 13 /* kernel/mosules/ss.c */ |
#define XARP_TABLE_FULL 14 /* drivers/net/arp.c */ |
149,14 → 149,14 |
#define ETOOMUCH_EXITFUNC (5 + LAST_STDERRNO) /* sys_atexit */ |
#define ENO_AVAIL_TASK (6 + LAST_STDERRNO) /* task_create */ |
#define ENO_AVAIL_SCHEDLEVEL (7 + LAST_STDERRNO) /* task_create */ |
#define ETASK_CREATE (8 + LAST_STDERRNO) /* task_create */ |
/* NOW UNUSED: ETASK_CREATE (8 + LAST_STDERRNO) task_create */ |
#define ENO_AVAIL_RESLEVEL (9 + LAST_STDERRNO) /* task_create */ |
#define ENO_GUARANTEE (10 + LAST_STDERRNO) /* task_create */ |
#define ENO_AVAIL_STACK_MEM (11 + LAST_STDERRNO) /* task_create */ |
#define ENO_AVAIL_TSS (12 + LAST_STDERRNO) /* task_create */ |
#define EUNVALID_KILL (13 + LAST_STDERRNO) /* task_kill */ |
#define EUNVALID_TASK_ID (14 + LAST_STDERRNO) /* task_activate */ |
#define EUNVALID_GROUP (15 + LAST_STDERRNO) /* group_activate e group_kill */ |
#define EINVALID_KILL (13 + LAST_STDERRNO) /* task_kill */ |
#define EINVALID_TASK_ID (14 + LAST_STDERRNO) /* task_activate */ |
#define EINVALID_GROUP (15 + LAST_STDERRNO) /* group_activate e group_kill */ |
#define EPORT_NO_MORE_DESCR (16 + LAST_STDERRNO) |
#define EPORT_NO_MORE_INTERF (17 + LAST_STDERRNO) |
167,11 → 167,11 |
#define EPORT_UNSUPPORTED_ACC (22 + LAST_STDERRNO) |
#define EPORT_WRONG_OP (23 + LAST_STDERRNO) |
#define EPORT_WRONG_TYPE (24 + LAST_STDERRNO) |
#define EPORT_UNVALID_DESCR (25 + LAST_STDERRNO) |
#define EPORT_INVALID_DESCR (25 + LAST_STDERRNO) |
#define ECAB_UNVALID_ID (26 + LAST_STDERRNO) |
#define ECAB_INVALID_ID (26 + LAST_STDERRNO) |
#define ECAB_CLOSED (27 + LAST_STDERRNO) |
#define ECAB_UNVALID_MSG_NUM (28 + LAST_STDERRNO) |
#define ECAB_INVALID_MSG_NUM (28 + LAST_STDERRNO) |
#define ECAB_NO_MORE_ENTRY (29 + LAST_STDERRNO) |
#define ECAB_TOO_MUCH_MSG (30 + LAST_STDERRNO) |
/shark/tags/rel_0_3/include/drivers/parport.h |
---|
0,0 → 1,241 |
/* |
* Project: |
* Parallel Port S.Ha.R.K. Project |
* |
* Module: |
* ppDrv.h |
* |
* Description: |
* file contents description |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors: |
* Andrea Battistotti <btandrea@libero.it> |
* Armando Leggio <a_leggio@hotmail.com> |
* |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
* |
*/ |
/* PPDrv.h |
header file for par port communication... |
*/ |
/* |
* Copyright (C) 2002 Andrea Battistotti , Armando Leggio |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* |
* CVS : $Id: parport.h,v 1.1 2002-10-28 07:52:11 pj Exp $ |
*/ |
#include <kernel/kern.h> |
#include <drivers/glib.h> |
#include <drivers/keyb.h> |
#include <time.h> |
#include <stdio.h> |
#include <stdlib.h> |
#include <kernel/func.h> |
#include <string.h> |
#include <ll/ll.h> |
#include <kernel/types.h> |
#include <kernel/descr.h> |
#include <math.h> |
/* general defs... */ |
#define PP_DEBUG 1 /* 1/0 Activate/Disactive internal debugs... */ |
#define PP_STATS 1 /* 1/0 Activate/Disactive internal statistics... */ |
/* return value... */ |
#define FALSE 0 |
#define TRUE 1 |
#define TIMEOUT 2 |
#define PP_BASE_ADR 0x0378 /* std addr for LPT1 */ |
#define BYTE unsigned char |
#define BOOL unsigned char |
#define BUF_IDX unsigned int |
#define BUF_PNTR unsigned int |
#define PIN_MASK unsigned int |
#define clock() sys_gettime(NULL) |
enum PIN_STATUS { PIN_OFF , PIN_ON } ; /* positive logic: off==0, on==1 */ |
/*********************************************************************************/ |
/* PART 1 : LOW LIVEL FUNC */ |
/* defs used in ppPinDrv....*/ |
/* for std & pin use of pp... */ |
#define PP_DATA_REG (PP_BASE_ADR+0) // address of data register |
#define PP_STATUS_REG (PP_BASE_ADR+1) // address of status register |
#define PP_CONTR_REG (PP_BASE_ADR+2) // address of control regist |
/* out data pins... */ |
#define PP_PIN_D0 0x01 /* pin 2 */ |
#define PP_PIN_D1 0x02 /* pin 3 */ |
#define PP_PIN_D2 0x04 /* pin 4 */ |
#define PP_PIN_D3 0x08 /* pin 5 */ |
#define PP_PIN_D4 0x10 /* pin 6 */ |
#define PP_PIN_D5 0x20 /* pin 7 */ |
#define PP_PIN_D6 0x40 /* pin 8 */ |
#define PP_PIN_D7 0x80 /* pin 9 */ |
/* status pins... */ |
#define PP_PIN_ERROR 0x08 /* pin 15 */ |
#define PP_PIN_SELECTED 0x10 /* pin 13 */ |
#define PP_PIN_PAPEROUT 0x20 /* pin 12 */ |
#define PP_PIN_ACK 0x40 /* pin 10 */ |
#define PP_PIN_BUSY 0x80 /* pin 11 */ |
/* control pins... */ |
#define PP_PIN_DATASTROBE 0x01 /* pin 1 */ |
#define PP_PIN_AUTOFEED 0x02 /* pin 14 */ |
#define PP_PIN_INITOUT 0x04 /* pin 16 */ |
#define PP_PIN_SELECT 0x08 /* pin 17 */ |
/* Data i/o */ |
#define ppSetDataByte(a) outp(PP_DATA_REG,a) |
#define ppReadDataByte() inp(PP_DATA_REG) |
/* this is NOT bidirectional actually: simply read value that I myself wrote on parport early...*/ |
/* in std lpt you cannot lay down electric pin D0,D1,..D7 from extern and read value in pc...*/ |
/* this (in std lpt) will broke down lpt port!... */ |
/* there are obviously also bi-dir port on 8 bit (ECC & ECP) but ctrl & status pins have */ |
/* different meaning so we don't manage them... See docs. */ |
/* Data pins */ |
void ppSetDataPin(int state, PIN_MASK pin); /* in ppPinDrv.c */ |
#define ppSetPin_D0(a) ppSetDataPin(a,PP_PIN_D0) /* On ==1 , Off == 0 */ |
#define ppSetPin_D1(a) ppSetDataPin(a,PP_PIN_D1) |
#define ppSetPin_D2(a) ppSetDataPin(a,PP_PIN_D2) |
#define ppSetPin_D3(a) ppSetDataPin(a,PP_PIN_D3) |
#define ppSetPin_D4(a) ppSetDataPin(a,PP_PIN_D4) |
#define ppSetPin_D5(a) ppSetDataPin(a,PP_PIN_D5) |
#define ppSetPin_D6(a) ppSetDataPin(a,PP_PIN_D6) |
#define ppSetPin_D7(a) ppSetDataPin(a,PP_PIN_D7) |
/* Status pins */ |
#define ppCheckPin_Error() (inp(PP_STATUS_REG & PP_PIN_ERROR)!=0?1:0) |
#define ppCheckPin_Selected() (inp(PP_STATUS_REG & PP_PIN_SELECTED)!=0?1:0) |
#define ppCheckPin_PaperOut() (inp(PP_STATUS_REG & PP_PIN_PAPEROUT)!=0?1:0) |
#define ppCheckPin_Acknowledge() (inp(PP_STATUS_REG & PP_PIN_ACK)!=0?1:0) |
#define ppCheckPin_Busy() (inp(PP_STATUS_REG & PP_PIN_BUSY)!=0?0:1) /* act low...*/ |
/* Control pins */ |
/* Control i/o */ |
#define ppSetCtrlByte(a) outp(PP_CONTR_REG,a) |
#define ppReadCtrlByte() inp(PP_CONTR_REG) |
/* idem...*/ |
void ppSetCtrlPin(int state, PIN_MASK pin); /* in ppPinDrv.c */ |
#define ppSetPin_DataStrobe(a) ppSetCtrlPin(!a,PP_PIN_DATASTROBE) /* low active...*/ |
#define ppSetPin_Autofeed(a) ppSetCtrlPin(!a,PP_PIN_AUTOFEED) /* low active...*/ |
#define ppSetPin_InitOut(a) ppSetCtrlPin(a,PP_PIN_INITOUT) |
#define ppSetPin_Select(a) ppSetCtrlPin(!a,PP_PIN_SELECT) /* low active...*/ |
/*********************************************************************************/ |
/* PART 2 : DATA TRANSFER BETWEEN PC */ |
/* defs used in ppDrv & ppNRTDrv... */ |
#define PPDRV_PERIOD 1000 /* 300000 ok for debug...*/ |
#define PPDRV_WCET 150 /* lower bound: 120; more if debug & stats are on...*/ |
#define PP_BUF_LEN 1024 /* between 2^2 and 2^16 (64k) */ |
#define CLK_TIMEOUT 55000 /* timeout for sync pc-pc...*/ |
/* for laplink use of std pp... */ |
#define TX_PORT PP_BASE_ADR /* transmit port */ |
#define RX_PORT TX_PORT+1 /* receive port */ |
/* laplink bit mask */ |
#define TX_DATA 0x0F /* 0000 1111 pin 2,3,4,5 */ |
#define TX_CTR 0x10 /* 0001 0000 bit 4 port TX pin 6*/ |
#define RX_DATA 0x78 /* 0111 1000 pin 15,13,12,10 */ |
#define RX_CTR 0x80 /* 1000 0000 bit 7 port RX pin 11*/ |
#define LSN 0x0F /* 0000 1111 low significative nibble */ |
#define MSN 0xF0 /* 1111 0000 most significative nibble */ |
#define BYTE_CTR 0xAF /* 1010 1111 control char */ |
/* comm protocol */ |
#define ppSendRTS() ppSetOnPinTX_CTR() |
#define ppIsRTS() ppReadIfPinRX_CTRIsOn() |
#define ppSendOTS() ppSetOnPinTX_CTR() |
#define ppIsOTS() ppReadIfPinRX_CTRIsOn() |
#define ppSendDR() ppSetOffPinTX_CTR() |
#define ppIsDR() ppReadIfPinRX_CTRIsOff() |
#define ppSendER() ppSetOffPinTX_CTR() |
#define ppIsER() ppReadIfPinRX_CTRIsOff() |
#define ppSetOnPinTX_CTR() outp(TX_PORT,(inp(TX_PORT)|TX_CTR)) /* used by: ppSendRTS ppSendOTS */ |
#define ppSetOffPinTX_CTR() outp(TX_PORT,(inp(TX_PORT)&(~TX_CTR))) /* used by: ppSendDR ppSendER */ |
#define ppReadIfPinRX_CTRIsOn() ((((~inp(RX_PORT))&RX_CTR)==0)?FALSE:TRUE) |
#define ppReadIfPinRX_CTRIsOff() (((BYTE)((~RX_CTR)|(~inp(RX_PORT)))==0x7F)?TRUE:FALSE) |
/* Funct Return Code */ |
enum PP_COMM_RTR_CODE { |
PP_COMM_OK, |
PP_COMM_NOREADYBYTES_EXC, |
PP_COMM_NOFREEBYTES_EXC |
}; |
/* Funct Return Code */ |
enum PP_SYSMSG_RTR_CODE { |
PP_SYSMSG_OK, |
PP_NOSYSMSG_EXC, |
PP_NOFREEMSG_EXC |
}; |
/* NON REAL TIME (== BLOCK) functions...*/ |
/* from ppNRTDrv.c...*/ |
BOOL ppNRTOpenComm(void); |
BOOL ppNRTWaitRTS(void); |
BOOL ppNRTWaitDR(void); |
BOOL ppNRTWaitOTS(void); |
BOOL ppNRTWaitER(void); |
BOOL ppNRTTxOneByte(BYTE c); |
BOOL ppNRTRxOneByte(BYTE *c); |
/* REAL TIME (== NON BLOCK) POLLING SERVER */ |
/* from ppDrv.c... */ |
void ppInitDrv(void (*pf)(char *)); /* NRT: to be called before start ppPollingSrv...*/ |
TASK ppPollingSvr(void *arg); /* periodic task to be started before any call to Rx/Tx...*/ |
/* input output function */ |
int ppRxOneByte(BYTE *c); /* retrive 1 byte */ |
int ppTxOneByte(BYTE c); /* send 1 byte */ |
int ppRxBytes(BYTE *c, unsigned int nbyte); /* retrive n byte... */ |
int ppTxBytes(BYTE *c, unsigned int nbyte); /* send n byte... */ |
/* System msg */ |
#define SYS_MSG_COLS 33 |
#define SYS_MSG_LINS 15 |
int ppReadSysMsg(char * buf); |
int ppWriteSysMsg(char * buf, ...); |
/shark/tags/rel_0_3/config/libdep.mk |
---|
151,6 → 151,11 |
LIB_DEP += $(LIB_PATH)/lib6025e.a |
endif |
# Parport |
ifeq ($(LIB_PATH)/libpport.a,$(wildcard $(LIB_PATH)/libpport.a)) |
LINK_LIB += -lpport |
LIB_DEP += $(LIB_PATH)/libpport.a |
endif |
/shark/tags/rel_0_3/config/hconf/hconf |
---|
File deleted |
\ No newline at end of file |
/shark/tags/rel_0_3/config/hconf/old/shower.c |
---|
File deleted |
/shark/tags/rel_0_3/config/hconf/old/ppp |
---|
File deleted |
/shark/tags/rel_0_3/lib/readme |
---|
0,0 → 1,0 |
This force directory existence |
/shark/tags/rel_0_3/ports/fftw/makefile |
---|
1,12 → 1,6 |
.PHONY: all install clean cleanall depend |
all install:: |
@echo |
@echo These files have been ported to Hartik |
@echo You have to expect some warnings!!! |
@echo |
all install clean cleanall depend:: |
make -C rfftw $@ |
make -C fftw $@ |
/shark/tags/rel_0_3/ports/mpg123/mpg123.h |
---|
1,4 → 1,3 |
#define exit l1_exit |
/* |
* mpg123 defines |
* used source: musicout.h from mpegaudio package |
5,6 → 4,7 |
*/ |
#include <stdio.h> |
#include <stdlib.h> |
#include <string.h> |
#include <signal.h> |
#include <math.h> |
/shark/tags/rel_0_3/ports/makefile |
---|
1,5 → 1,5 |
dirs := $(filter-out CVS makefile, $(wildcard *)) |
dirs := $(filter-out CVS cvs makefile, $(wildcard *)) |
p_all := $(addprefix prefixall_, $(dirs)) |
p_install := $(addprefix prefixinstall_, $(dirs)) |
p_clean := $(addprefix prefixclean_, $(dirs)) |
/shark/tags/rel_0_3/fs/msdos/msdos_i.c |
---|
34,11 → 34,11 |
*/ |
/* |
* CVS : $Id: msdos_i.c,v 1.1.1.1 2002-03-29 14:12:50 pj Exp $ |
* CVS : $Id: msdos_i.c,v 1.2 2002-10-28 08:24:43 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:50 $ |
* Revision: $Revision: 1.2 $ |
* Last update: $Date: 2002-10-28 08:24:43 $ |
*/ |
#include <fs/types.h> |
58,7 → 58,7 |
*/ |
#define DEBUG_ADDCLUSTER KERN_DEBUG |
#undef DEBUG_ADDCLUSTER KERN_DEBUG |
#undef DEBUG_ADDCLUSTER |
#define DEBUG_ADDCLUSTER_EXTRA KERN_DEBUG |
#undef DEBUG_ADDCLUSTER_EXTRA |
/shark/tags/rel_0_3/fs/fs.c |
---|
34,11 → 34,11 |
*/ |
/* |
* CVS : $Id: fs.c,v 1.1.1.1 2002-03-29 14:12:50 pj Exp $ |
* CVS : $Id: fs.c,v 1.2 2003-01-07 17:14:05 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:50 $ |
* Revision: $Revision: 1.2 $ |
* Last update: $Date: 2003-01-07 17:14:05 $ |
*/ |
#include "dentry.h" |
761,8 → 761,6 |
return (void*)-1; |
} |
//sys_status(SCHED_STATUS); |
printkc("fs_shut: END"); |
return (void*)0; |
/shark/tags/rel_0_3/fs/fs.h |
---|
34,11 → 34,11 |
*/ |
/* |
* CVS : $Id: fs.h,v 1.1.1.1 2002-03-29 14:12:50 pj Exp $ |
* CVS : $Id: fs.h,v 1.2 2002-11-11 08:40:44 pj Exp $ |
* |
* File: $File$ |
* Revision: $Revision: 1.1.1.1 $ |
* Last update: $Date: 2002-03-29 14:12:50 $ |
* Revision: $Revision: 1.2 $ |
* Last update: $Date: 2002-11-11 08:40:44 $ |
*/ |
/*** |
51,6 → 51,7 |
#include <fs/types.h> |
#include <fs/mount.h> |
#include <time.h> |
#include "mutex.h" |
#include "semaph.h" |
222,10 → 223,13 |
#ifdef SHUTDOWNTIMEOUT |
{ |
int counter; |
struct timespec delay; |
delay.tv_sec=SHUTDOWNSLICE/1000000; |
delay.tv_nsec=(SHUTDOWNSLICE%1000000)*1000; |
counter=0; |
while (counter<SHUTDOWNCOUNTER&&__fs_sem_trywait(&fssyssync)) { |
counter++; |
task_delay(SHUTDOWNSLICE); |
nanosleep(&delay, NULL); |
} |
if (counter>=SHUTDOWNCOUNTER) { |
printk(KERN_NOTICE "filesystem shutdown timeout... aborting!"); |
/shark/tags/rel_0_3/README.TXT |
---|
0,0 → 1,14 |
TO COMPILE THIS DISTRIBUTION: |
You need to copy a suitable config.mk from config/mk/ to the config/ |
directory. |
For example, a Linux user should type |
cp config/mk/linux.mk config/config.mk |
from the S.Ha.R.K. root directory. |
Then, just type make, and the kernel will be compiled. |
Enjoy! |
/shark/tags/rel_0_3/makefile |
---|
1,15 → 1,16 |
# |
# Main HARTIK makefile |
# Main S.Ha.R.K. makefile |
# |
ifndef BASE |
BASE=. |
endif |
include $(BASE)/config/config.mk |
# |
.PHONY: install all clean cleanall depend test |
.PHONY: install all clean cleanall depend |
install all clean cleanall depend: |
make -C oslib $@ |
18,21 → 19,3 |
make -C fs $@ |
make -C libc $@ |
make -C ports $@ |
test: |
make -C examples $@ |
# |
# some usefull hidden target (made by Paolo) |
# |
ifeq ($(SYSNAME),MS-DOS) |
.PHONY: e |
e: |
dir /s *.err >errlist |
list errlist |
endif |
/shark/tags/rel_0_3/drivers/net/ne.c |
---|
227,12 → 227,12 |
} |
if (!pdev) |
continue; |
printk(KERN_INFO "ne.c: PCI BIOS reports %s at i/o %#x, irq %d.\n", |
printk(KERN_INFO "ne.c: PCI BIOS reports %s at i/o %x, irq %d.\n", |
pci_clone_list[i].name, |
pci_ioaddr, pci_irq_line); |
printk("*\n* Use of the PCI-NE2000 driver with this card is recommended!\n*\n"); |
if (ne_probe1(dev, pci_ioaddr) != 0) { /* Shouldn't happen. */ |
printk(KERN_ERR "ne.c: Probe of PCI card at %#x failed.\n", pci_ioaddr); |
printk(KERN_ERR "ne.c: Probe of PCI card at %x failed.\n", pci_ioaddr); |
pci_irq_line = 0; |
return -ENXIO; |
} |
285,7 → 285,7 |
if (ei_debug && version_printed++ == 0) |
printk(version); |
printk(KERN_INFO "NE*000 ethercard probe at %#3x:", ioaddr); |
printk(KERN_INFO "NE*000 ethercard probe at %3x:", ioaddr); |
/* A user with a poor card that fails to ack the reset, or that |
does not have a valid 0x57,0x57 signature can still use this |
473,7 → 473,7 |
dev->dev_addr[i] = SA_prom[i]; |
} |
printk("\n%s: %s found at %#x, using IRQ %d.\n", |
printk("\n%s: %s found at %x, using IRQ %d.\n", |
dev->name, name, ioaddr, dev->irq); |
ei_status.name = name; |
638,7 → 638,7 |
} while (--tries > 0); |
if (tries <= 0) |
printk(KERN_WARNING "%s: RX transfer address mismatch," |
"%#4.4x (expected) vs. %#4.4x (actual).\n", |
"%4.4x (expected) vs. %4.4x (actual).\n", |
dev->name, ring_offset + xfer_count, addr); |
} |
#endif |
/shark/tags/rel_0_3/drivers/net/eth.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: eth.c,v 1.1.1.1 2002-03-29 14:12:50 pj Exp $ |
CVS : $Id: eth.c,v 1.3 2002-11-11 08:41:31 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:50 $ |
Revision: $Revision: 1.3 $ |
Last update: $Date: 2002-11-11 08:41:31 $ |
------------ |
**/ |
74,6 → 74,8 |
/*#include "lowlev.h" |
//#include "3com.h" */ |
//#define DEBUG_ETH |
#define ETH_PAGE 5 |
struct eth_service{ |
131,7 → 133,7 |
void dev_tint(struct device *dev) |
{ |
cprintf("Warning!!!! dev_tint called!!! (Why???)\n"); |
printk(KERN_WARNING "Warning!!!! dev_tint called!!! (Why?)\n"); |
sys_abort(201); |
} |
145,7 → 147,7 |
{ |
//cprintf("DENTRO netif_rx, skbuf=%p\n",skb->data); |
if (nettask_pid == NIL) { |
cprintf("Net receives packets, but the driver doesn't exist!!!\n"); |
printk(KERN_CRIT "Net receives packets, but the driver doesn't exist!!!\n"); |
sys_abort(300); |
} |
200,7 → 202,7 |
/* formatted print of an ethernet header */ |
void eth_printHeader(struct eth_header *p) |
{ |
cprintf("Dest : %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x \n",p->dest.ad[0], |
cprintf("Dest : %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x \n",p->dest.ad[0], |
p->dest.ad[1], |
p->dest.ad[2], |
p->dest.ad[3], |
338,27 → 340,27 |
{ |
int p; |
if (err != ETH_BUFFERS_FULL) cprintf("Ethernet : "); |
if (err != ETH_BUFFERS_FULL) printk(KERN_ERR "Ethernet : "); |
switch (err) { |
case ETH_DRIVER_NOT_FOUND : |
cprintf("NET PANIC --> Etherlink not found.\n"); |
printk(KERN_ERR "NET PANIC --> Etherlink not found.\n"); |
return 0; |
case ETH_RXERROR : |
cprintf("Receive error (vero dramma!!!).\n"); |
printk(KERN_ERR "Receive error (vero dramma!!!).\n"); |
return 0; |
case ETH_TXERROR : |
cprintf("Transimit error: N. Max Retry.\n"); |
printk(KERN_ERR "Transimit error: N. Max Retry.\n"); |
return 0; |
case ETH_PROTOCOL_ERROR : |
cprintf("Too much protocols.\n"); |
printk(KERN_ERR "Too much protocols.\n"); |
return 0; |
case ETH_BUFFERS_FULL: |
cprintf("Buffers full: frame lost!\n"); |
printk(KERN_ERR "Buffers full: frame lost!\n"); |
return 1; |
case ETH_NULLPROTOCOL_EXC: |
cprintf("Null protocol called!!!\n"); |
printk(KERN_ERR "Null protocol called!!!\n"); |
for (p = 0; p < ETH_MAX_PROTOCOLS; p++) { |
cprintf("%d: %d\n", p, eth_table[p].type); |
printk(KERN_ERR "%d: %d\n", p, eth_table[p].type); |
} |
return 0; |
default : |
377,7 → 379,7 |
int linux_found = 0; |
if (!ethIsInstalled) { |
cprintf(" Hartik Net lib\n\n"); |
printk(KERN_INFO "Hartik/Shark Net lib"); |
/* Scan the devices connected to the PCI bus */ |
cardtype = NONE; |
391,24 → 393,30 |
soft_task_def_aperiodic(m_soft); |
soft_task_def_system(m_soft); |
soft_task_def_nokill(m_soft); |
m = &m_soft; |
m = (TASK_MODEL *)&m_soft; |
} |
nettask_pid = task_create("rxProc", net_extern_driver, m, NULL); |
if (nettask_pid == NIL) { |
cprintf("Can't create extern driver!!!\n"); |
return 0; |
printk(KERN_ERR "Can't create extern driver!!!\n"); |
return 0; |
} |
task_activate(nettask_pid); |
if (pci_init() == 1) { |
linuxpci_init(); |
// pci_show(); |
cprintf("LF %d\n", linux_found); |
#ifdef DEBUG_ETH |
printk(KERN_DEBUG "LF %d\n", linux_found); |
#endif |
linux_found += (rtl8139_probe(&device0) == 0); |
cprintf("LF %d\n", linux_found); |
#ifdef DEBUG_ETH |
printk(KERN_DEBUG "LF %d\n", linux_found); |
#endif |
linux_found += (tc59x_probe(&device0) == 0); |
cprintf("LF %d\n", linux_found); |
#ifdef DEBUG_ETH |
printk(KERN_DEBUG "LF %d\n", linux_found); |
#endif |
#if 0 |
ndev = pci_scan_bus(pci_devs); |
#ifdef __ETH_DBG__ |
434,7 → 442,7 |
} else { |
lowlev_send = vortex_send_mem; |
} |
cprintf("PCI Ethlink card found:\n"); |
printk(KERN_INFO "PCI Ethlink card found:\n"); |
lowlev_info(r); |
cardtype = VORTEX; |
} |
460,10 → 468,14 |
#else |
} |
if (linux_found == 0) { |
linux_found += (el3_probe(&device0) == 0); |
cprintf("LF %d\n", linux_found); |
linux_found += (ne_probe(&device0) == 0); |
cprintf("LF %d\n", linux_found); |
linux_found += (el3_probe(&device0) == 0); |
#ifdef DEBUG_ETH |
printk(KERN_DEBUG "LF %d\n", linux_found); |
#endif |
linux_found += (ne_probe(&device0) == 0); |
#ifdef DEBUG_ETH |
printk(KERN_DEBUG "LF %d\n", linux_found); |
#endif |
} |
/* |
474,9 → 486,9 |
*/ |
if (linux_found) { |
device0.open(&device0); |
cprintf("Net card found!!!\n"); |
printk(KERN_INFO "Net card found!!!\n"); |
} else { |
cprintf("No card found... \n"); |
printk(KERN_INFO "No card found... \n"); |
/* cprintf("No card found... Installing loopback device\n"); |
loopback_init(&device0); |
device0.open(&device0);*/ |
497,7 → 509,7 |
sys_atrunlevel(eth_close,NULL,RUNLEVEL_BEFORE_EXIT); |
} else { |
cprintf("Ethernet already installed!!!\n"); |
printk(KERN_INFO "Ethernet already installed!!!\n"); |
return 0; |
} |
return 1; |
505,10 → 517,12 |
void eth_close(void *a) |
{ |
kern_printf("CLOSE!!!!\n"); |
if (ethIsInstalled == TRUE) { |
device0.stop(&device0); /*This seems to break everithing... |
// lowlev_close(eth_dev.BaseAddress);*/ |
ethIsInstalled = FALSE; |
} |
#ifdef DEBUG_ETH |
printk(KERN_DEBUG "CLOSE!!!!\n"); |
#endif |
if (ethIsInstalled == TRUE) { |
device0.stop(&device0); /*This seems to break everithing... |
// lowlev_close(eth_dev.BaseAddress);*/ |
ethIsInstalled = FALSE; |
} |
} |
/shark/tags/rel_0_3/drivers/net/net.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: net.c,v 1.1.1.1 2002-03-29 14:12:50 pj Exp $ |
CVS : $Id: net.c,v 1.2 2002-10-28 08:01:36 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:50 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2002-10-28 08:01:36 $ |
------------ |
**/ |
63,6 → 63,8 |
#include <drivers/net.h> |
#include "eth_priv.h" |
//#define DEBUG_NET |
/* OKKIO!!!!! net_base must change if you change NET_MAX_PROTOCOLS!!!! */ |
struct net_model net_base = {0, 0, {NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
88,8 → 90,10 |
} |
/* Then, the high level layers */ |
for(i = 0; i < m->numprotocol; i++) { |
cprintf("Protocol %d init \n", i); |
m->protocol[i].initfun(m->protocol[i].initparms); |
#ifdef DEBUG_NET |
printk(KERN_DEBUG "Protocol %d init \n", i); |
#endif |
m->protocol[i].initfun(m->protocol[i].initparms); |
} |
return 1; |
/shark/tags/rel_0_3/drivers/parport/ppnrtdrv.c |
---|
0,0 → 1,295 |
/* |
* |
* Project: |
* Parallel Port S.Ha.R.K. Project |
* |
* Module: |
* ppNRTDrv.c |
* |
* Description: |
* file contents description |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors: |
* Andrea Battistotti <btandrea@libero.it> |
* Armando Leggio <a_leggio@hotmail.com> |
* |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
* |
*/ |
/************************************************************************* |
* Module : ppNRTDrv.c |
* Author : Andrea Battistotti , Armando Leggio |
* Description: Set On/Off single pin of LPT1... |
* 2002 @ Pavia - GNU Copyrights |
*******************************************************************************************/ |
/* |
* Copyright (C) 2002 Andrea Battistotti , Armando Leggio |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* |
* CVS : $Id: ppnrtdrv.c,v 1.1 2002-10-28 08:03:54 pj Exp $ |
*/ |
/******************************************************************************************* |
* A standard PC provides for three printer ports, at the following base addresses: |
* |
* LPT1 = 0x0378 or 0x03BC |
* LPT2 = 0x0278 or 0x0378 |
* LPT3 = 0x0278 |
* |
* This module assumes that LPT1 is at 0x0378. |
* |
* The printer port has three 8-bit registers: |
* |
* Data Register (base + 0) ........ outputs |
* |
* 7 6 5 4 3 2 1 0 |
* . . . . . . . * D0 ........... (pin 2), 1=High, 0=Low (true) |
* . . . . . . * . D1 ........... (pin 3), 1=High, 0=Low (true) |
* . . . . . * . . D2 ........... (pin 4), 1=High, 0=Low (true) |
* . . . . * . . . D3 ........... (pin 5), 1=High, 0=Low (true) |
* . . . * . . . . D4 ........... (pin 6), 1=High, 0=Low (true) |
* . . * . . . . . D5 ........... (pin 7), 1=High, 0=Low (true) |
* . * . . . . . . D6 ........... (pin 8), 1=High, 0=Low (true) |
* * . . . . . . . D7 ........... (pin 9), 1=High, 0=Low (true) |
* |
* Status Register (base + 1) ...... inputs |
* |
* 7 6 5 4 3 2 1 0 |
* . . . . . * * * Undefined |
* . . . . * . . . Error ........ (pin 15), high=1, low=0 (true) |
* . . . * . . . . Selected ..... (pin 13), high=1, low=0 (true) |
* . . * . . . . . No paper ..... (pin 12), high=1, low=0 (true) |
* . * . . . . . . Ack .......... (pin 10), high=1, low=0 (true) |
* * . . . . . . . Busy ......... (pin 11), high=0, low=1 (inverted) |
* |
* Control Register (base + 2) ..... outputs |
* |
* 7 6 5 4 3 2 1 0 |
* . . . . . . . * Strobe ....... (pin 1), 1=low, 0=high (inverted) |
* . . . . . . * . Auto Feed .... (pin 14), 1=low, 0=high (inverted) |
* . . . . . * . . Initialize ... (pin 16), 1=high, 0=low (true) |
* . . . . * . . . Select ....... (pin 17), 1=low, 0=high (inverted) |
* * * * * . . . . Unused |
* |
* Pins 18-25 are ground. |
********************************************************************************************/ |
#include <drivers/parport.h> |
/*************************************************************************/ |
BOOL ppNRTWaitRTS(void) |
{ |
BYTE port; |
TIME start = clock(); |
do |
{ |
port = ~inp(RX_PORT); /* port status */ |
port = port & RX_CTR; /* test cntr read bit*/ |
if(clock() > start + CLK_TIMEOUT) return TIMEOUT; |
} while(port == 0); |
return TRUE; |
} |
/*************************************************************************/ |
BOOL ppNRTWaitDR(void) |
{ |
BYTE port; |
TIME start = clock(); /* start */ |
do { |
port = (~inp(RX_PORT)) | (~RX_CTR); /* test cntr read bit*/ |
if(clock() > start + CLK_TIMEOUT) return TIMEOUT; |
} while(port != 0x7F); /* 0111 1111 */ |
return TRUE; |
} |
/*************************************************************************/ |
BOOL ppNRTWaitOTS(void) |
{ |
BYTE port; |
TIME start = clock(); /* start time */ |
do |
{ |
port = ~inp(RX_PORT); /* read port status */ |
port = port & RX_CTR; /* test cntr read bit*/ |
if(clock() > start + CLK_TIMEOUT) return TIMEOUT; |
} while(port == 0); |
return TRUE; |
} |
/*************************************************************************/ |
BOOL ppNRTWaitER(void) |
{ |
BYTE port; |
TIME start = clock(); /* start */ |
do |
{ |
port = ~inp(RX_PORT); /* read port status */ |
port = port | (~RX_CTR); /* test cntr read bit*/ |
if(clock() > start + CLK_TIMEOUT) return TIMEOUT; |
} while(port != 0x7F); /* 0111 1111 */ |
return TRUE; |
} |
/**************************************************************************/ |
BOOL ppNRTTxOneByte(BYTE c) |
{ |
BYTE port; |
/*------------------------- Least Significative Nibble */ |
ppSendRTS(); /* Request To Send */ |
port = inp(TX_PORT); /* read port status */ |
port = port & (~TX_DATA); /* set tx bits == 0 */ |
port = port | (c & LSN); /* set bit 0..3 with LSN */ |
#if PP_DEBUG == 1 |
kern_printf("ppNRTTxOneByte: SendRTS, before WaitOTS\n"); |
#endif |
if(ppNRTWaitOTS()!=TRUE) return TIMEOUT; |
outp(TX_PORT,port); |
ppSendDR(); /* Data Ready */ |
#if PP_DEBUG == 1 |
kern_printf("ppNRTTxOneByte: SendDR, before WaitER\n"); |
#endif |
if(ppNRTWaitER() !=TRUE) return TIMEOUT; |
/*------------------------- More Significative Nibble */ |
ppSendRTS(); /* Request To Send */ |
port = inp(TX_PORT); /* read port status */ |
port = port & (~TX_DATA); /* set off trasmission bits... */ |
port = port | (c >> 4); /* set bit 0..3 with MSN */ |
#if PP_DEBUG == 1 |
kern_printf("ppNRTTxOneByte: SendRTS, before WaitOTS\n"); |
#endif |
if(ppNRTWaitOTS()!=TRUE) return TIMEOUT; |
outp(TX_PORT,port); /* send data nibble... */ |
ppSendDR(); /* Data Ready */ |
#if PP_DEBUG == 1 |
kern_printf("ppNRTTxOneByte: SendDR, before WaitER\n"); |
#endif |
if(ppNRTWaitER() !=TRUE) return TIMEOUT; |
return TRUE; |
} |
/**************************************************************************/ |
BOOL ppNRTRxOneByte(BYTE *c) |
{ |
BYTE port; |
if(ppIsRTS() == FALSE) return FALSE; |
ppSendOTS(); /* Ok To Send */ |
#if PP_DEBUG == 1 |
kern_printf("ppNRTRxOneByte: IsRTS, SendOTS, before WaitDR\n"); |
#endif |
if(ppNRTWaitDR() != TRUE) return TIMEOUT; |
port = inp(RX_PORT); /* read nibble */ |
ppSendER(); /* End Read */ |
*c = (port >> 3); /* read LSN */ |
#if PP_DEBUG == 1 |
kern_printf("ppNRTRxOneByte: SendER, before WaitRTS\n"); |
#endif |
if(ppNRTWaitRTS() != TRUE) return TIMEOUT; |
ppSendOTS(); |
#if PP_DEBUG == 1 |
kern_printf("ppNRTRxOneByte: SendOTS, before WaitDR\n"); |
#endif |
if(ppNRTWaitDR() != TRUE) return TIMEOUT; |
#if PP_DEBUG == 1 |
kern_printf("ppNRTRxOneByte: DR received, send ER\n"); |
#endif |
port = inp(RX_PORT); /* read nibble */ |
ppSendER(); /* End Read */ |
*c = (*c & ~MSN); /* set 0 c MSN nibble... */ |
*c = (*c | ((port >> 3) << 4)); /* read MSN */ |
return TRUE; |
} |
/************************************************************************* |
------------------------------------------------------------------------*/ |
BOOL ppNRTOpenComm(void) |
{ |
BYTE c,port,rx,tx; |
TIME start = clock(); |
outp(TX_PORT,0); |
outp(TX_CTR,0); |
do |
{ |
port = inp(RX_PORT); /* read nibble */ |
/* x4321x xx */ |
c = (port >> 3); /* xxxx 4321 */ |
c = c & 0xF; /* 0000 4321 */ |
if(clock() > start + CLK_TIMEOUT*20) return TIMEOUT; |
} while (c != 0); /* test nibble 4321 == 0000 */ |
/* the other also is laying down his TX_PORT (==my RX_PORT...) */ |
rx = FALSE; tx = FALSE; start = clock(); |
do /* try if it also on line... */ |
{ |
if((rx == FALSE) && (ppNRTRxOneByte(&c)==TRUE) && (c == BYTE_CTR)) rx = TRUE; |
if((tx == FALSE) && (ppNRTTxOneByte(BYTE_CTR) == TRUE)) tx = TRUE; |
if(clock() > start + CLK_TIMEOUT*20) return TIMEOUT; |
} while(tx==FALSE || rx==FALSE); |
ppSendER(); |
ppNRTWaitER(); |
return TRUE; |
} |
/shark/tags/rel_0_3/drivers/parport/pppindrv.c |
---|
0,0 → 1,127 |
/* |
* |
* Project: |
* Parallel Port S.Ha.R.K. Project |
* |
* Module: |
* ppPinDrv.c |
* |
* Description: |
* file contents description |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors: |
* Andrea Battistotti <btandrea@libero.it> |
* Armando Leggio <a_leggio@hotmail.com> |
* |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
* |
*/ |
/******************************************************************************************* |
* Module : ppPinDrv.c |
* Author : Andrea Battistotti , Armando Leggio |
* Description: Set On/Off single pin of LPT1... |
* 2002 @ Pavia - GNU Copyrights |
*******************************************************************************************/ |
/* |
* Copyright (C) 2002 Andrea Battistotti , Armando Leggio |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* |
* CVS : $Id: pppindrv.c,v 1.1 2002-10-28 08:03:55 pj Exp $ |
*/ |
/******************************************************************************************* |
* A standard PC provides for three printer ports, at the following base addresses: |
* |
* LPT1 = 0x0378 or 0x03BC |
* LPT2 = 0x0278 or 0x0378 |
* LPT3 = 0x0278 |
* |
* This module assumes that LPT1 is at 0x0378. |
* |
* The printer port has three 8-bit registers: |
* |
* Data Register (base + 0) ........ outputs |
* |
* 7 6 5 4 3 2 1 0 |
* . . . . . . . * D0 ........... (pin 2), 1=High, 0=Low (true) |
* . . . . . . * . D1 ........... (pin 3), 1=High, 0=Low (true) |
* . . . . . * . . D2 ........... (pin 4), 1=High, 0=Low (true) |
* . . . . * . . . D3 ........... (pin 5), 1=High, 0=Low (true) |
* . . . * . . . . D4 ........... (pin 6), 1=High, 0=Low (true) |
* . . * . . . . . D5 ........... (pin 7), 1=High, 0=Low (true) |
* . * . . . . . . D6 ........... (pin 8), 1=High, 0=Low (true) |
* * . . . . . . . D7 ........... (pin 9), 1=High, 0=Low (true) |
* |
* Status Register (base + 1) ...... inputs |
* |
* 7 6 5 4 3 2 1 0 |
* . . . . . * * * Undefined |
* . . . . * . . . Error ........ (pin 15), high=1, low=0 (true) |
* . . . * . . . . Selected ..... (pin 13), high=1, low=0 (true) |
* . . * . . . . . No paper ..... (pin 12), high=1, low=0 (true) |
* . * . . . . . . Ack .......... (pin 10), high=1, low=0 (true) |
* * . . . . . . . Busy ......... (pin 11), high=0, low=1 (inverted) |
* |
* Control Register (base + 2) ..... outputs |
* |
* 7 6 5 4 3 2 1 0 |
* . . . . . . . * Strobe ....... (pin 1), 1=low, 0=high (inverted) |
* . . . . . . * . Auto Feed .... (pin 14), 1=low, 0=high (inverted) |
* . . . . . * . . Initialize ... (pin 16), 1=high, 0=low (true) |
* . . . . * . . . Select ....... (pin 17), 1=low, 0=high (inverted) |
* * * * * . . . . Unused |
* |
* Pins 18-25 are ground. |
********************************************************************************************/ |
#include <drivers/parport.h> |
void ppSetDataPin(int state, PIN_MASK pin) |
{ |
BYTE port; |
port=ppReadDataByte(); |
switch (state) |
{ |
case PIN_OFF: port &= ~pin; |
case PIN_ON: port |= pin; |
} |
ppSetDataByte(port); |
} |
void ppSetCtrlPin(int state, PIN_MASK pin) |
{ |
BYTE port; |
port=ppReadCtrlByte(); |
switch (state) |
{ |
case PIN_OFF: port &= ~pin; |
case PIN_ON: port |= pin; |
} |
ppSetCtrlByte(port); |
} |
/shark/tags/rel_0_3/drivers/parport/ppdrv.c |
---|
0,0 → 1,543 |
/* |
* Project: |
* Parallel Port S.Ha.R.K. Project |
* |
* Module: |
* ppDrv.c |
* |
* Description: |
* file contents description |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors: |
* Andrea Battistotti <btandrea@libero.it> |
* Armando Leggio <a_leggio@hotmail.com> |
* |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
* |
*/ |
/********************************************************************************************************** |
* Module : ppDrv.c Author : Andrea Battistotti , Armando Leggio |
* Description: Tranfer Byte via LPT1 laplink cable... 2002 @ Pavia - |
* GNU Copyrights */ |
/* |
* Copyright (C) 2002 Andrea Battistotti , Armando Leggio |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* |
* CVS : $Id: ppdrv.c,v 1.1 2002-10-28 08:03:54 pj Exp $ |
*/ |
#include <drivers/parport.h> |
/* internal */ |
BYTE RxBuf[PP_BUF_LEN]; /* Received bytes buffer */ |
BYTE TxBuf[PP_BUF_LEN]; /* To transmit bytes buffer */ |
unsigned int nextByteReadyToRx; /* pointer to first byte to read by ppOneByteRx(BYTE *c) ... */ |
unsigned int nextByteReadyToTx; /* when ppbTransmit had to send a byte, send this...*/ |
unsigned int nextByteFreeRx; /* when polling complete reading a byte, will save it in this loc...*/ |
unsigned int nextByteFreeTx; /* when ppbTransmit had to write a byte, write it here...*/ |
/* define inline macro */ |
/* these are for cicle buffer mamagement */ |
#define RxPointerGap (nextByteFreeRx-nextByteReadyToRx) |
#define bytesReadyToRx ((RxPointerGap)>=0? (RxPointerGap):PP_BUF_LEN+(RxPointerGap)) |
#define TxPointerGap (nextByteFreeTx-nextByteReadyToTx) |
#define bytesReadyToTx ((TxPointerGap)>=0? (TxPointerGap):PP_BUF_LEN+(TxPointerGap)) |
#define freeBytesInRxBuffer (PP_BUF_LEN-bytesReadyToRx) |
#define freeBytesInTxBuffer (PP_BUF_LEN-bytesReadyToTx) |
/* for pp sys msg */ |
char SysMsg[SYS_MSG_COLS+1][SYS_MSG_LINS+1]; /* space for sys msgs... */ |
char bufMsg[SYS_MSG_COLS+1]; /* to build msgs... */ |
unsigned int nextMsgToRead; |
unsigned int nextMsgFreeToWrite; |
/* define inline macro */ |
/* these are for cicle buffer mamagement */ |
#define msgPointerGap (nextMsgFreeToWrite-nextMsgToRead) |
#define msgReadyToRead ((msgPointerGap)>=0? (msgPointerGap):SYS_MSG_LINS+(msgPointerGap)) |
#define freeMsgInBuffer (SYS_MSG_LINS-msgReadyToRead) |
/* status ... */ |
enum ppReadingAvailableStates |
{ |
ppNoAllowReading, /* writing is on...*/ |
ppNoReading, |
ppWaitingDR_Nibble1, |
ppWaitingRTS_Nibble2, |
ppWaitingDR_Nibble2, |
}; |
enum ppWritingAvailableStates |
{ |
ppNoAllowWriting, /* reading is on...*/ |
ppNoWriting, |
ppWaitingOTS_Nibble1, |
ppWaitingER_Nibble1, |
ppWaitingOTS_Nibble2, |
ppWaitingER_Nibble2 |
}; |
int ppStatusReading; |
int ppStatusWriting; |
BYTE ppReceivingByte; |
BYTE ppTransmittingByte; |
#if PP_STATS == 1 |
/* for internal statistic ...if activate...*/ |
long statReading[ppWaitingDR_Nibble2+1]; |
long statWriting[ppWaitingER_Nibble2+1]; |
#endif |
/*********************************************/ |
/* sys msg managment */ |
/*********************************************/ |
int ppReadSysMsg(char * buf) |
{ |
if (!msgReadyToRead) /* there is nothing to read...*/ |
{ |
return (PP_NOSYSMSG_EXC); |
} |
else |
{int i=0; |
while (i<SYS_MSG_COLS && SysMsg[nextMsgToRead][i]) /* !='\0'...*/ |
*buf++=SysMsg[nextMsgToRead][i++]; /* read char */ |
*buf='\0'; |
nextMsgToRead=++nextMsgToRead%SYS_MSG_LINS; /* circular buffer increment */ |
return (PP_SYSMSG_OK); |
} |
} |
int ppWriteSysMsg(char * buf, ...) |
{ |
char * pbufMsg=bufMsg; |
va_list args; |
bufMsg[0]='\0'; |
va_start(args, buf); |
vsprintf(bufMsg,buf,args); /* Not garatee msg len... */ |
va_end(args); |
if (freeMsgInBuffer < 1) |
{ |
return (PP_NOFREEMSG_EXC); |
} |
else |
{int i=0; |
while ((i<SYS_MSG_COLS) && (*pbufMsg) ) /* !='\0'...*/ |
SysMsg[nextMsgFreeToWrite][i++]=*pbufMsg++; |
SysMsg[nextMsgFreeToWrite][i-1]='\n'; |
SysMsg[nextMsgFreeToWrite][i]='\0'; |
nextMsgFreeToWrite=++nextMsgFreeToWrite%SYS_MSG_LINS; /* circular buffer pointer increment */ |
return (PP_SYSMSG_OK); |
} |
} |
/******************************************/ |
/* Inizialization: this is NRT task... */ |
/******************************************/ |
void ppInitDrv(void (*pf)(char *)) |
{ |
/* set to zero all pointer & buffer & status... */ |
while(ppNRTOpenComm()!=TRUE) // not real-time.... |
{ |
if (pf!=NULL) (*pf)("Waiting Open Communcation...\n"); |
ppSendER(); |
} |
if (pf!=NULL) (*pf)("Open Communcation OK!\n"); |
} |
/******************************************/ |
/* TX RX funtions... */ |
/******************************************/ |
int ppRxOneByte(BYTE *c) |
{ |
if (!bytesReadyToRx) /* there is nothing to read...*/ |
{ |
return (PP_COMM_NOREADYBYTES_EXC); |
} |
else |
{ |
*c=RxBuf[nextByteReadyToRx]; /* read byte */ |
nextByteReadyToRx=++nextByteReadyToRx%PP_BUF_LEN; /* circular buffer increment */ |
return (PP_COMM_OK); |
} |
} |
int ppTxOneByte(BYTE c) |
{ |
if (freeBytesInTxBuffer < 1) |
{ |
return (PP_COMM_NOFREEBYTES_EXC); |
} |
else |
{ |
TxBuf[nextByteFreeTx]=c; |
nextByteFreeTx=++nextByteFreeTx%PP_BUF_LEN; /* circular buffer pointer increment */ |
return (PP_COMM_OK); |
} |
} |
int ppTxBytes(BYTE * c, unsigned int nbyte) |
{ |
if (freeBytesInTxBuffer<nbyte) /* if there are less than nbyte return nothing...*/ |
{ |
return (PP_COMM_NOFREEBYTES_EXC); |
} |
else |
{ unsigned int i; |
for (i=0;i<nbyte;i++) |
{ |
TxBuf[nextByteFreeTx]=*c++; |
nextByteFreeTx=++nextByteFreeTx%PP_BUF_LEN; /* circular buffer pointer increment */ |
} |
return (PP_COMM_OK); |
} |
} |
int ppRxBytes(BYTE * c, unsigned int nbyte) |
{ |
if (bytesReadyToRx<nbyte) /* if there are less than nbyte return nothing...*/ |
{ |
return (PP_COMM_NOREADYBYTES_EXC); |
} |
else |
{ unsigned int i; |
for (i=0;i<nbyte;i++) |
{ |
*c++=RxBuf[nextByteReadyToRx]; /* read byte */ |
#if PP_DEBUG == 1 |
ppWriteSysMsg("Received value: %i %i of %i \n",RxBuf[nextByteReadyToRx],i,nbyte); |
#endif |
nextByteReadyToRx=++nextByteReadyToRx%PP_BUF_LEN; /* circular buffer increment */ |
} |
return (PP_COMM_OK); |
} |
} |
/* polling server ... */ |
TASK ppPollingSvr(void *arg) |
{ |
BYTE port; |
nextByteReadyToRx=0; |
nextByteReadyToTx=0; |
nextByteFreeRx=0; |
nextByteFreeTx=0; |
nextMsgToRead=0; |
nextMsgFreeToWrite=0; |
ppStatusReading=ppNoReading; |
ppStatusWriting=ppNoWriting; |
ppWriteSysMsg("Polling Server started...\n"); |
task_endcycle(); |
while (1) |
{ |
/* case ppReading: read ... */ |
switch (ppStatusReading) |
{ |
case ppNoAllowReading: break; |
case ppNoReading: |
ppStatusWriting=ppNoWriting; |
if(!ppIsRTS()) break; |
ppSendOTS(); /* Set Ok To Send - the other one can send... */ |
#if PP_DEBUG == 1 |
ppWriteSysMsg(" %i : Received RTS...\n", ppStatusReading); |
#endif |
ppStatusWriting=ppNoAllowWriting; |
ppStatusReading=ppWaitingDR_Nibble1; |
case ppWaitingDR_Nibble1: |
#if PP_STATS == 1 /* for internal statistic ...*/ |
statReading[ppStatusReading]++; |
#endif |
#if PP_DEBUG == 1 |
ppWriteSysMsg("Send OTS\n"); |
ppWriteSysMsg("Waiting DR Nibble1\n"); |
#endif |
if(!ppIsDR()) break; /* data no ready: read it next period...*/ |
port = inp(RX_PORT); /* read nibble */ |
ppSendER(); /* send a End Read */ |
ppReceivingByte = (port >> 3); /* read LSN */ |
ppStatusReading=ppWaitingRTS_Nibble2; |
case ppWaitingRTS_Nibble2: |
#if PP_STATS == 1 /* for internal statistic ...*/ |
statReading[ppStatusReading]++; |
#endif |
#if PP_DEBUG == 1 |
ppWriteSysMsg("Received DR Nibble1\n"); |
ppWriteSysMsg("Send ER Nibble1\n"); |
ppWriteSysMsg("Waiting RTS Nibble2\n"); |
#endif |
if(!ppIsRTS()) break; /* */ |
ppSendOTS(); /* Ok To Send - the other one can send... */ |
ppStatusReading=ppWaitingDR_Nibble2; |
case ppWaitingDR_Nibble2: |
#if PP_STATS == 1 /* for internal statistic ...*/ |
statReading[ppStatusReading]++; |
#endif |
#if PP_DEBUG == 1 |
ppWriteSysMsg("Received RTS Nibble2\n"); |
ppWriteSysMsg("Send OTS Nibble2\n"); |
ppWriteSysMsg("Waiting DR Nibble2\n"); |
#endif |
if(!ppIsDR()) break; |
port = inp(RX_PORT); /* read nibble */ |
ppSendER(); /* send a End Read */ |
#if PP_DEBUG == 1 |
ppWriteSysMsg("Received DR Nibble2\n"); |
ppWriteSysMsg("Read Nibble2\n"); |
ppWriteSysMsg("Send ER Nibble2\n"); |
#endif |
ppReceivingByte = (ppReceivingByte & ~MSN); /* set to zero c MSN */ |
ppReceivingByte = (ppReceivingByte | ((port >> 3) << 4)); /* read MSN */ |
/* here is possible insert some ctrl ... */ |
/* byte is ok, so now make it available to ppRxOneByte() */ |
RxBuf[nextByteFreeRx]=ppReceivingByte; |
nextByteFreeRx=++nextByteFreeRx%PP_BUF_LEN; /* circular buffer pointer increment */ |
#if PP_STATS == 1 |
ppWriteSysMsg("Trasmission :\n"); |
ppWriteSysMsg("W_DR_1 : %d\n",statReading[ppWaitingDR_Nibble1]); |
ppWriteSysMsg("W_RTS_2 : %d\n",statReading[ppWaitingRTS_Nibble2]); |
ppWriteSysMsg("W_DR_2 : %d\n",statReading[ppWaitingDR_Nibble2]); |
ppWriteSysMsg("Received byte : %i\n",ppReceivingByte); |
statReading[ppWaitingDR_Nibble1]=0; |
statReading[ppWaitingRTS_Nibble2]=0; |
statReading[ppWaitingDR_Nibble2]=0; |
#endif |
/* end reading so reset status... */ |
ppStatusReading=ppNoReading; |
//ppStatusWriting=ppNoWriting; |
break; |
} |
/* case Writing: can only if this cycle not is reading... */ |
switch (ppStatusWriting) |
{ |
case ppNoAllowWriting: break; |
case ppNoWriting: |
ppStatusReading=ppNoReading; |
if(!bytesReadyToTx) |
{ |
break; |
#if PP_DEBUG == 1 |
ppWriteSysMsg("Writin break\n"); |
#endif |
} |
else |
{ |
#if PP_DEBUG == 1 |
ppWriteSysMsg("NO Writin break\n"); |
ppWriteSysMsg("TX Gap: %i \n",TxPointerGap); |
ppWriteSysMsg("nextByteFreeTx: %i\n",nextByteFreeTx); |
ppWriteSysMsg("nextByteReadyToTx: %i\n",nextByteReadyToTx); |
#endif |
} |
ppSendRTS(); /* Set On RequestToSend bit */ |
ppTransmittingByte=TxBuf[nextByteReadyToTx]; |
#if PP_DEBUG == 1 |
ppWriteSysMsg("pllsvr: ppTransmittingByte : %i %c \n",ppTransmittingByte,ppTransmittingByte); |
#endif |
port = inp(TX_PORT) & (~TX_DATA); /* set to zero trasmission bits */ |
port = port | (ppTransmittingByte & LSN); /* set bits 0..3 with LSN */ |
ppStatusWriting=ppWaitingOTS_Nibble1; |
ppStatusReading=ppNoAllowWriting; |
case ppWaitingOTS_Nibble1: |
#if PP_STATS == 1 |
statWriting[ppStatusWriting]++; |
#endif |
#if PP_DEBUG == 1 |
ppWriteSysMsg(" Send RTS Nibble1\n"); |
ppWriteSysMsg(" Waiting OTS Nibble1\n"); |
#endif |
if(!ppIsOTS()) break; |
outp(TX_PORT,port); /* send nibble 1 */ |
ppSendDR(); /* set on Data Ready bit */ |
ppStatusWriting=ppWaitingER_Nibble1; |
case ppWaitingER_Nibble1: |
#if PP_STATS == 1 |
statWriting[ppStatusWriting]++; |
#endif |
#if PP_DEBUG == 1 |
ppWriteSysMsg(" Send Nibble1\n"); |
ppWriteSysMsg(" Send DR Nibble1\n"); |
ppWriteSysMsg(" Waiting ER Nibble1\n"); |
#endif |
if(!ppIsER()) break; |
ppSendRTS(); /* send trasmission request */ |
port = inp(TX_PORT) & (~TX_DATA); /* set to zero bit trasmission bits */ |
port = port | (ppTransmittingByte >> 4); /* set bits 0..3 with MSN */ |
ppStatusWriting=ppWaitingOTS_Nibble2; |
case ppWaitingOTS_Nibble2: |
#if PP_STATS == 1 |
statWriting[ppStatusWriting]++; |
#endif |
#if PP_DEBUG == 1 |
ppWriteSysMsg(" Received ER Nibble1\n"); |
ppWriteSysMsg(" Send RTS Nibble2\n"); |
ppWriteSysMsg(" Waiting OTS Nibble2\n"); |
#endif |
if(!ppIsOTS()) break; |
outp(TX_PORT,port); /* send nibble 2 */ |
ppSendDR(); /* set on Data Ready bit */ |
ppStatusWriting=ppWaitingER_Nibble2; |
case ppWaitingER_Nibble2: |
#if PP_STATS == 1 |
statWriting[ppStatusWriting]++; |
#endif |
#if PP_DEBUG == 1 |
ppWriteSysMsg(" Received OTS Nibble2\n"); |
ppWriteSysMsg(" Write Nibble2\n"); |
ppWriteSysMsg(" Send DR Nibble2\n"); |
ppWriteSysMsg(" Waiting ER Nibble2\n"); |
#endif |
if(!ppIsER()) break; |
/* byte is ok, so move pointer to next byte to be send... */ |
nextByteReadyToTx=++nextByteReadyToTx%PP_BUF_LEN; /* circular buffer pointer increment */ |
#if PP_STATS == 1 |
ppWriteSysMsg("Reception :\n"); |
ppWriteSysMsg("W_OTS_1 : %ld\n",statWriting[ppWaitingOTS_Nibble1]); |
ppWriteSysMsg("W_ER_2 : %ld\n",statWriting[ppWaitingER_Nibble2]); |
ppWriteSysMsg("W_OTS_2 : %ld\n",statWriting[ppWaitingOTS_Nibble2]); |
statWriting[ppWaitingOTS_Nibble1]=0; |
statWriting[ppWaitingER_Nibble2]=0; |
statWriting[ppWaitingOTS_Nibble2]=0; |
#endif |
/* end writing. so reset status... */ |
ppStatusReading=ppNoReading; |
ppStatusWriting=ppNoWriting; |
} |
task_endcycle(); |
} |
return (0); |
} |
/shark/tags/rel_0_3/drivers/parport/makefile |
---|
0,0 → 1,15 |
# The Parallel Port Library, by Andrea Battistotti & Armando Leggio |
ifndef BASE |
BASE=../.. |
endif |
include $(BASE)/config/config.mk |
LIBRARY = pport |
OBJS_PATH = $(BASE)/drivers/parport |
OBJS = ppdrv.o ppnrtdrv.o pppindrv.o |
include $(BASE)/config/lib.mk |
/shark/tags/rel_0_3/drivers/pxc/pxc.c |
---|
261,7 → 261,7 |
proc_table[p].frozen_activations++; |
else { |
l = proc_table[p].task_level; |
level_table[l]->task_activate(l,p); |
level_table[l]->public_activate(l,p); |
event_need_reschedule(); |
} |
/shark/tags/rel_0_3/drivers/pci/pci_scan.c |
---|
3,17 → 3,15 |
#include <ll/i386/hw-arch.h> |
#include <ll/i386/hw-io.h> |
#include <ll/i386/cons.h> |
#include <ll/i386/error.h> |
#include <ll/i386/mem.h> |
#include <ll/stdlib.h> |
#include <drivers/llpci.h> |
#include <drivers/pci.h> |
#include <drivers/linuxpci.h> |
#include <kernel/log.h> |
//#define DEBUG_PCISCAN |
static struct pci_dev pci_devs[N_MAX_DEVS]; |
static struct pci_bus pci_root; |
146,7 → 144,9 |
*/ |
child = kmalloc(sizeof(*child), GFP_ATOMIC); |
if(child==NULL) { |
error(KERN_ERR "pci: out of memory for bridge.\n"); |
#ifdef DEBUG_PCISCAN |
printk(KERN_ERR "pci: out of memory for bridge.\n"); |
#endif |
continue; |
} |
memset(child, 0, sizeof(*child)); |
221,11 → 221,15 |
pcibios_init(); |
if (!pci_present()) { |
error("PCI: No PCI bus detected\n"); |
#ifdef DEBUG_PCISCAN |
printk("PCI: No PCI bus detected\n"); |
#endif |
return; |
} |
error("PCI: Probing PCI hardware\n"); |
#ifdef DEBUG_PCISCAN |
printk("PCI: Probing PCI hardware\n"); |
#endif |
memset(&pci_root, 0, sizeof(pci_root)); |
pci_root.subordinate = pci_scan_bus(&pci_root); |
} |
/shark/tags/rel_0_3/drivers/pci/pci.c |
---|
3,15 → 3,13 |
#include <ll/i386/hw-arch.h> |
#include <ll/i386/hw-io.h> |
#include <ll/i386/cons.h> |
#include <ll/stdlib.h> |
#include <drivers/llpci.h> |
#include <drivers/pci.h> |
#include <drivers/linuxpci.h> |
#include <kernel/log.h> |
static int ndev = 0; |
static struct pci_des pci_devs[N_MAX_DEVS]; |
108,7 → 106,10 |
{ |
if (pci_class(class_code, index, bus, dev) != NULL) { |
cprintf("PCIBIOS_FIND_CLASS: found at bus %d, dev %d\n", *bus, *dev); |
#ifdef DEBUG_PCI |
printk(KERN_DEBUG "PCIBIOS_FIND_CLASS:" |
"found at bus %d, dev %d\n", *bus, *dev); |
#endif |
return PCIBIOS_SUCCESSFUL; |
} else { |
return PCIBIOS_DEVICE_NOT_FOUND; |
122,14 → 123,15 |
int i; |
struct pci_regs *r; |
cprintf(" DevLib PCI support\n\n"); |
cprintf(" PCI config type %d\n", pcibios_present()); |
cprintf(" %d PCI devices found:\n\n", ndev); |
printk(KERN_INFO "DevLib PCI support\n\n"); |
printk(KERN_INFO "PCI config type %d\n", pcibios_present()); |
printk(KERN_INFO "%d PCI devices found:\n\n", ndev); |
for(i = 0; i < ndev; i++) { |
cprintf(" %d: bus %d dev %d\n",i , pci_devs[i].bus, pci_devs[i].dev); |
r = (struct pci_regs *) pci_devs[i].mem; |
cprintf(" Vendor: %s", pci_strvendor(r->VendorId)); |
cprintf(" Class: %s\n", pci_strclass(r->ClassCode << 8)); |
printk(KERN_INFO "%d: bus %d dev %d\n", |
i, pci_devs[i].bus, pci_devs[i].dev); |
printk(KERN_INFO "Vendor: %s", pci_strvendor(r->VendorId)); |
printk(KERN_INFO "Class: %s\n", pci_strclass(r->ClassCode << 8)); |
} |
} |
/shark/tags/rel_0_3/drivers/char/scom.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: scom.c,v 1.1.1.1 2002-03-29 14:12:49 pj Exp $ |
CVS : $Id: scom.c,v 1.2 2003-01-07 17:14:05 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:49 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2003-01-07 17:14:05 $ |
------------ |
Author: Massimiliano Giorgi |
482,7 → 482,6 |
task_activate(p3); |
task_endcycle(); |
sys_end(); |
sys_status(NORM_STATUS); |
#ifdef __DEBUG_SERIAL__ |
cprintf("RxServer was activated %d times\n",rx_time); |
cprintf("TxServer was activated %d times\n",tx_time); |
/shark/tags/rel_0_3/drivers/char/sermouse.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: sermouse.c,v 1.1.1.1 2002-03-29 14:12:49 pj Exp $ |
CVS : $Id: sermouse.c,v 1.2 2002-11-11 08:41:31 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:49 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2002-11-11 08:41:31 $ |
------------ |
Author: Gerardo Lamastra |
84,6 → 84,7 |
//#include <cons.h> |
#include <kernel/kern.h> |
#include <time.h> |
//#include "sys/sys.h" |
//#include "vm.h" |
//#include "kern.h" |
405,15 → 406,19 |
int port; |
int ret; |
int found; |
struct timespec delay; |
delay.tv_sec = 0; |
delay.tv_nsec = 500000000; |
found=0; |
for (port=COM1;port<=COM4;port++) { |
ret=com_open(port,1200,NONE,7,1); |
if (ret==1) { |
com_write(port,MCR,0x0e); |
task_delay(500000l); /* necessary? */ |
nanosleep(&delay,NULL); /* necessary? */ |
com_write(port,MCR,0x0f); |
task_delay(500000l); /* necessary? */ |
nanosleep(&delay,NULL); /* necessary? */ |
ret=sem_wait(&com_link[mouse_port].rx_sem); |
if (ret==TRUE) { |
if (*(com_link[mouse_port].rx_buf)=='M') found=1; |
/shark/tags/rel_0_3/drivers/char/rtc.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: rtc.c,v 1.1.1.1 2002-03-29 14:12:49 pj Exp $ |
CVS : $Id: rtc.c,v 1.2 2002-11-11 08:41:31 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:49 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2002-11-11 08:41:31 $ |
------------ |
Author: Massimiliano Giorgi |
183,7 → 183,7 |
SYS_FLAGS flags; |
unsigned char ctrl; |
unsigned retries=0; |
unsigned delay; |
struct timespec delay; |
/* |
* read RTC once any update in progress is done. The update |
201,8 → 201,9 |
barrier(); |
*/ |
delay=1000; |
while (rtc_is_updating()&&++retries<=5) task_delay(delay); |
delay.tv_nsec = 1000000; |
delay.tv_sec = 0; |
while (rtc_is_updating()&&++retries<=5) nanosleep(&delay, NULL); |
if (retries>5) return -1; |
/* |
/shark/tags/rel_0_3/drivers/char/8042.c |
---|
20,11 → 20,11 |
/** |
------------ |
CVS : $Id: 8042.c,v 1.1.1.1 2002-03-29 14:12:49 pj Exp $ |
CVS : $Id: 8042.c,v 1.2 2002-11-11 08:41:31 pj Exp $ |
File: $File$ |
Revision: $Revision: 1.1.1.1 $ |
Last update: $Date: 2002-03-29 14:12:49 $ |
Revision: $Revision: 1.2 $ |
Last update: $Date: 2002-11-11 08:41:31 $ |
------------ |
8042.h |
348,7 → 348,7 |
static int C8042_reset(void) |
{ |
int c; |
int c=0; |
int retries=16; |
trace("8042 reset START"); |
/shark/tags/rel_0_3/drivers/makefile |
---|
1,5 → 1,5 |
dirs := $(filter-out CVS makefile, $(wildcard *)) |
dirs := $(filter-out CVS cvs makefile, $(wildcard *)) |
p_all := $(addprefix prefixall_, $(dirs)) |
p_install := $(addprefix prefixinstall_, $(dirs)) |
p_clean := $(addprefix prefixclean_, $(dirs)) |
/shark/tags/rel_0_3/drivers/linuxcom/include/linux/netdevice.h |
---|
8,12 → 8,21 |
#include <linux/skbuff.h> |
#include <linux/notifier.h> |
#include <time.h> |
// for 3c59x.c (!!!) |
#define le32_to_cpu(val) (val) |
#define cpu_to_le32(val) (val) |
#define test_and_set_bit(val, addr) set_bit(val, addr) |
#define mdelay(x) task_delay((x)*1000) |
static __inline__ void mdelay(int x) |
{ |
struct timespec delay; |
delay.tv_sec=x/1000; |
delay.tv_nsec=(x%1000)*1000000; |
nanosleep(&delay, NULL); |
} |
#define kfree(x) { } |
#define ioremap(a,b) \ |
(((a)<0x100000) ? (void *)((u_long)(a)) : vremap(a,b)) |
/shark/tags/rel_0_3/drivers/linuxcom/include/linux/compatib.h |
---|
56,7 → 56,7 |
/* Linux kernel call emulation */ |
#define kmalloc(a,b) malloc(a) |
#define printk cprintf |
//#define printk cprintf I would like to use the kernel printk if possible... |
#define check_region(a,b) 0 |
#define request_region(a,b,c) |
/shark/tags/rel_0_3/drivers/linuxcom/auto_irq.c |
---|
1,5 → 1,6 |
#include<asm/bitops.h> |
#include<kernel/kern.h> |
#include <time.h> |
struct device *irq2dev_map[16] = {0, 0, /* ... zeroed */}; |
17,6 → 18,7 |
int autoirq_setup(int waittime) |
{ |
int i; |
struct timespec delay; |
handled = 0; |
27,7 → 29,9 |
} |
/* Hang out at least <waittime> jiffies waiting for bogus IRQ hits. */ |
task_delay(waittime); |
delay.tv_sec = waittime/1000000; |
delay.tv_nsec = (waittime%1000000)*1000; |
nanosleep(&delay, NULL); |
return handled; |
} |
34,10 → 38,13 |
int autoirq_report(int waittime) |
{ |
struct timespec delay; |
int i; |
/* Hang out at least <waittime> jiffies waiting for the IRQ. */ |
task_delay(waittime); |
delay.tv_sec=waittime/1000000; |
delay.tv_nsec=(waittime%1000000)*1000; |
nanosleep(&delay, NULL); |
/* Retract the irq handlers that we installed. */ |
for (i = 0; i < 16; i++) { |