Subversion Repositories shark

Rev

Rev 1123 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1085 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
1287 giacomo 23
 CVS :        $Id: slsh.c,v 1.5 2003-12-17 13:52:47 giacomo Exp $
1085 pj 24
 
25
 File:        $File$
1287 giacomo 26
 Revision:    $Revision: 1.5 $
27
 Last update: $Date: 2003-12-17 13:52:47 $
1085 pj 28
 ------------
29
 
30
 This file contains the scheduling module for Slot-Shifting.
31
 
32
 Read slsh.h for further details.
33
 
34
**/
35
 
36
/*
37
 * Copyright (C) 2000 Paolo Gai
38
 *
39
 * This program is free software; you can redistribute it and/or modify
40
 * it under the terms of the GNU General Public License as published by
41
 * the Free Software Foundation; either version 2 of the License, or
42
 * (at your option) any later version.
43
 *
44
 * This program is distributed in the hope that it will be useful,
45
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
46
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
47
 * GNU General Public License for more details.
48
 *
49
 * You should have received a copy of the GNU General Public License
50
 * along with this program; if not, write to the Free Software
51
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
52
 *
53
 */
54
 
55
 
56
#include "slsh.h"
57
#include <ll/stdio.h>
58
#include <ll/stdlib.h>
59
#include <ll/string.h>
60
#include <ll/math.h>    /* for ceil(...) */
61
#include <ll/ll.h>      /* for memcpy(...) */
62
#include <kernel/model.h>
63
#include <kernel/descr.h>
64
#include <kernel/var.h>
65
#include <kernel/func.h>
66
 
67
//#define eslsh_printf kern_printf
68
#define slsh_printf printk
69
 
70
/* Keeps information about static and guaranteed tasks */
71
typedef struct {
72
        int est;
73
        int dabs;
74
        int interval;
75
} SLSH_task;
76
 
77
/*+ Status used in the level +*/
78
#define SLSH_READY                      MODULE_STATUS_BASE
79
#define SLSH_WAIT                       MODULE_STATUS_BASE + 1
80
#define SLSH_IDLE                       MODULE_STATUS_BASE + 2
81
#define SLSH_WCET_VIOLATED      MODULE_STATUS_BASE + 3
82
 
83
/*+ defines +*/
84
#define MAX_INTERVALS 1000              /* 1000 intervals is max, for now */
85
 
86
/******+ the level redefinition for the SLOT SHIFT level +*******/
87
typedef struct {
88
        level_des l;                    /*+ the standard level descriptor+*/
89
 
90
        /* task lists */
91
        SLSH_task tasks[MAX_PROC];  /* est and dl's for static and guaranteed task */
92
 
1107 pj 93
        IQUEUE idle_statics; /* finished static tasks */
1085 pj 94
 
1107 pj 95
        IQUEUE unspecified;     /* tasks with only a wcet */
1085 pj 96
 
97
        /* the Intervals list */
98
        SLSH_interval intervals[MAX_INTERVALS];
99
        int current;                    /* current interval */
100
        int last;                       /* last interval */
101
 
102
        int slot;                       /* slot shifting time */
103
        TIME slot_length;       /* slothlength in real system time*/
104
        int LCM;                        /* length (in slots) of ofline schedule */
105
 
106
        int slot_event;         /* save the event */
107
} SLSH_level_des;
108
 
109
 
110
/* check if some tasks are ready, return 0 if ready, -1 otherwise */
111
static int SLSH_R(SLSH_task* tasks)
112
{
113
        int s;
114
 
115
        /* for all static tasks */
116
        for(s = 0; tasks[s].est != -1; ++s)
117
        {
118
           if(proc_table[s].status == SLSH_READY)
119
               return 0;
120
        }
121
        return -1;
122
}
123
 
124
/* check if unspecified exists, return 0 if it exists, -1 otherwise */
1107 pj 125
static int SLSH_T(IQUEUE *unspecified)
1085 pj 126
{
1107 pj 127
        if(!iq_isempty(unspecified))
1085 pj 128
                return 0;
129
        else
130
                return -1;
131
}
132
 
133
/* return the sc in an interval */
134
static int SLSH_sc(SLSH_interval* intervals, int i)
135
{
136
        return intervals[i].sc;
137
}
138
/* return a static task from current interval or a guaranted task */
139
static PID SLSH_staticOrGuaranteed(SLSH_level_des* lev)
140
{
141
        int lowest_dl = 0;      /* lowest dl found */
142
        PID pid = 0;            /* static or guaranteed task */
143
        int t;
144
 
145
        /* Decide according to EDF, go through all static & guaranteed tasks */
146
        for(t = 0; t < MAX_PROC; ++t)
147
        {
148
                /* static tasks */
149
                if(proc_table[t].pclass == STATIC_PCLASS)
150
                {
151
                        /* static task must belong to current interval */
152
                        if(lev->tasks[t].interval == lev->current)
153
                        {
154
                                /* only ready tasks */
155
                                if(proc_table[t].status == SLSH_READY)
156
                                {
157
                                        /* a new lower dl was found */
158
                                        if(lev->tasks[t].dabs < lowest_dl)
159
                                        {
160
                                                lowest_dl = lev->tasks[t].dabs;
161
                                                pid = t;
162
                                        }
163
                                }
164
                        }
165
                } /* guaranteed tasks */
166
                else if(proc_table[t].pclass == HARD_PCLASS)
167
                {
168
                        /* only ready tasks */
169
                        if(proc_table[t].status == SLSH_READY)
170
                        {
171
                                /* a new lower dl was found */
172
                                if(lev->tasks[t].dabs < lowest_dl)
173
                                {
174
                                        lowest_dl = lev->tasks[t].dabs;
175
                                        pid = t;
176
                        }
1123 pj 177
                        }
1085 pj 178
                }
179
        }/* for all tasks */
180
 
181
        return pid;    
182
}
183
 
184
/* return a static task among the candidates, all ready statics */
185
static PID SLSH_candidates(SLSH_task* tasks)
186
{
187
        int lowest_dl = 0;
1123 pj 188
        PID pid = -1;
1085 pj 189
        int t;
190
 
191
        /* Use the EDL algorithm again to decide which task to run */
192
        for(t = 0; t < MAX_PROC; ++t)
193
        {
194
                /* only static tasks */
195
                if(proc_table[t].pclass == STATIC_PCLASS)
196
                {
197
                        /* only ready tasks */
198
                        if(proc_table[t].status == SLSH_READY)
199
                        {      
200
                                /* a new lower dl was found */
201
                                if(tasks[t].dabs < lowest_dl)
202
                                {
203
                                        lowest_dl = tasks[t].dabs;
204
                                        pid = t;
205
                                }
206
                        }/* all ready tasks */
207
                }/* all static tasks */
208
        }/* for all tasks */
209
 
210
        return pid;
211
}
212
 
213
/* decrease the sc in a interval by amount */
214
void SLSH_decSc(SLSH_interval* intervals, int i, int amount)
215
{
216
        intervals[i].sc -= amount;
217
}
218
 
219
void SLSH_incSc(SLSH_interval* intervals, int i, int amount)
220
{
221
        intervals[i].sc += amount;
222
}
223
 
224
/* swap the sc between intervals, also consider intervals with negative sc */
225
void SLSH_swapSc(SLSH_interval* intervals, int current, int task_interval)
226
{
227
        /* decrease the sc in the current interval */
228
        SLSH_decSc(intervals, current, 1);
229
 
230
        /* update the other interval(s) */
231
        if(intervals[task_interval].sc < 0)     /* negative sc */
232
        {
233
                /* special case, increase next interval sc by 1 and also current interval (borrowing) */
234
                if(task_interval == current + 1)
235
                {
236
                        SLSH_incSc(intervals, task_interval, 1);
237
                        SLSH_incSc(intervals, current, 1);
238
                }
239
                else /* increase every interval sc that is negative between current and task_interval */
240
                {
241
                        while(task_interval > current && intervals[task_interval].sc < 0)
242
                        {
243
                                SLSH_incSc(intervals, task_interval, 1);
244
                                task_interval--;
245
                        }
246
                }
247
        }
248
        else /* ordinary swapping */
249
                SLSH_incSc(intervals, task_interval, 1);
250
}
251
 
252
/* The scheduler, decides which task to run. */
1123 pj 253
static PID SLSH_public_scheduler(LEVEL l)
1085 pj 254
{
255
        SLSH_level_des* lev = (SLSH_level_des *)(level_table[l]);
256
        PID pid;
257
 
258
        /* The scheduler choses among static, guaranteed (hard aperiodic) and
259
             unspecified (soft aperiodic) tasks */
260
        /* no ready tasks and no sc, execute idle task */
261
        if(SLSH_R(lev->tasks) == 0 && SLSH_sc(lev->intervals, lev->current) == 0)
262
                return NIL;
263
        /* must execute a static from current intervall or a guaranteed task */
264
        else if(SLSH_R(lev->tasks) > 0 && SLSH_sc(lev->intervals, lev->current) == 0)
265
                return SLSH_staticOrGuaranteed(lev);
266
        /* sc available... */
267
        else if(SLSH_R(lev->tasks) > 0 && SLSH_sc(lev->intervals, lev->current) > 0)
268
        {
269
                /* If unspecified exist, execute it according to FIFO order */
1107 pj 270
                if(SLSH_T(&lev->unspecified) == 0)
1085 pj 271
                {
272
                        SLSH_decSc(lev->intervals, lev->current, 1);    /* decrease sc by 1 */
1107 pj 273
                        return iq_getfirst(&lev->unspecified);
1085 pj 274
                }
275
                else /* No unspecified, execute task from candidates (statics) */
276
                {              
277
                        pid = SLSH_candidates(lev->tasks);
278
 
279
                        /* sc needs to be swapped */
280
                        if(lev->tasks[pid].interval != lev->current)
281
                                SLSH_swapSc(lev->intervals, lev->tasks[pid].interval, lev->current);
282
 
283
                        return pid;            
284
                }              
285
        }
286
 
287
        kern_printf("(SLSH s)");
288
        return NIL;
289
}
290
 
291
/* not used, slot-shifting handles all guarantees itself, it handles all bandwidth */
1123 pj 292
static int SLSH_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
1085 pj 293
{
294
        *freebandwidth = 0;
295
        return 1;
296
}
297
 
298
/* get the interval that x is in */
299
static int SLSH_getInterval(SLSH_interval* intervals, int x, int last)
300
{
301
        int i;
302
 
303
        /* search through the intervals  */
304
        for(i = 0; i <= last; ++i)
305
        {
306
                /* I is in the interval where start is smaller or equal and end is bigger */           
307
                if(intervals[i].start <= x && x < intervals[i].end)
308
                        return i;
309
        }
310
        return -1;
311
}
312
 
313
/* get the start of the interval I */
314
static int SLSH_intervalStart(SLSH_interval* intervals, int I)
315
{
316
        return intervals[I].start;
317
}
318
 
319
/* split interval I into two parts, slow because of copying. OBS!!! no check if there is
320
    enough space in the intervals array */
321
static void SLSH_splitInterval(SLSH_level_des* lev, int I, int dabs)
322
{
323
        SLSH_interval left_interval;
324
        int i;
325
 
326
 
327
        lev->last++;
328
 
329
        /* move every interval above and including I */
330
        for(i = lev->last; i > I; --i)
331
                memcpy(&lev->intervals[i], &lev->intervals[i - 1], sizeof(SLSH_interval));
332
 
333
        /* Left interval start, end and length */
334
        left_interval.start = lev->intervals[I].start;
335
        left_interval.end = dabs;
336
        left_interval.length = left_interval.end - left_interval.start;
337
 
338
        /* Right interval (uses old interval struct) start and length end remains as the old value */
339
        lev->intervals[I + 1].start = dabs;
340
        lev->intervals[I + 1].length = lev->intervals[I + 1].end - lev->intervals[I + 1].start;
341
 
342
        /* check if sc still exists in the right interval */
343
        if(lev->intervals[I + 1].length - lev->intervals[I + 1].maxt > 0)
344
        {
345
                lev->intervals[I + 1].sc = lev->intervals[I + 1].length - lev->intervals[I + 1].maxt;
346
                left_interval.sc = left_interval.length; /* the whole interval is free, for now... */
347
        }
348
        else /* no sc in the right interval */
349
        {
350
                lev->intervals[I + 1].maxt = lev->intervals[I + 1].length;
351
                left_interval.sc = lev->intervals[I + 1].sc; /* all sc in left interval */
352
                lev->intervals[I + 1].sc = 0;
353
        }        
354
 
355
        /* insert the new interval */
356
        memcpy(&lev->intervals[I], &left_interval, sizeof(SLSH_interval));
357
}
358
 
359
/* Reduce the sc from back to front by the wcet amount, interval splitting may be neccesary */
360
static void SLSH_updateSc(SLSH_level_des* lev, HARD_TASK_MODEL* h)
361
{
362
        int dabs = ceil((lev->slot + h->drel)/lev->slot_length); /* absolute deadline of request */
363
        int dabs_interval = SLSH_getInterval(lev->intervals, dabs, lev->last); /* interval where dabs is */
364
        int C = ceil(h->wcet/lev->slot_length); /* amount of sc to reduce */   
365
        int sc = 0;
366
        int i;
367
 
368
        /* check if interval splitting is neccesary */
369
        if(lev->intervals[dabs_interval].end != dabs)
370
                SLSH_splitInterval(lev, dabs_interval, dabs);
371
 
372
        /* decrease sc in all intervals that are neccesary from dabs_interval o current */
373
        for(i = dabs_interval; i >= lev->current && C > 0; --i)
374
        {
375
                if((sc = SLSH_sc(lev->intervals, i)) >= 0) /* only decrease where sc exists */
376
                {
377
                        if(sc > C) /* the last sc dec */
378
                        {
379
                                SLSH_decSc(lev->intervals, i, C);
380
                                C = 0;
381
                        }
382
                        else    /* to little sc in this interval, decrease it to 0 */
383
                        {
384
                                C -= SLSH_sc(lev->intervals, i);
385
                                SLSH_decSc(lev->intervals, i, SLSH_sc(lev->intervals, i));
386
                        }              
387
                }
388
        }/* for all intervals */
389
}
390
 
391
/* the guarantee algorithm for hard aperiodic requests */
392
static int SLSH_guarantee(SLSH_level_des* lev, HARD_TASK_MODEL* h)
393
{
394
        int total_sc = 0;
395
        int temp, i;
396
        int dabs = ceil((lev->slot + h->drel)/lev->slot_length); /* absolute deadline of request */
397
        int dabs_interval = SLSH_getInterval(lev->intervals, dabs, lev->last); /* interval where dabs is */
398
 
399
        /* check if the sc up until request deadline is >= request wcet */
400
        /* 1. the sc of the current interal */
401
        total_sc = SLSH_sc(lev->intervals, lev->current);
402
 
403
        /* 2. the sc for all whole intervals between current and the interval
404
            with the request deadline */
405
        for(i = (lev->current) + 1; i < dabs_interval; ++i)
406
        {
407
                if((temp = SLSH_sc(lev->intervals, i)) > 0)
408
                        total_sc += temp;
409
        }
410
 
411
        /* 3. the min of sc or the execution need in the last interval */
412
        total_sc += min(SLSH_sc(lev->intervals, dabs_interval),
413
                                dabs - SLSH_intervalStart(lev->intervals,
414
                                dabs_interval));
415
 
416
        if(total_sc >= h->wcet)
417
        {       /* update the sc in the intervals from back to front */
418
                SLSH_updateSc(lev, h);
419
                return 0;
420
        }
421
        else
422
                return -1;
423
}
424
 
425
/* check if task model is accepted and store nessecary parameters */
1123 pj 426
static int SLSH_public_create(LEVEL l, PID p, TASK_MODEL *m)
1085 pj 427
{
428
        SLSH_level_des *lev = (SLSH_level_des *)(level_table[l]);
429
        STATIC_TASK_MODEL* s;
430
        HARD_TASK_MODEL* h;
431
        SOFT_TASK_MODEL* u;
432
 
1123 pj 433
 
434
        /* Check the models */
435
        switch(m->pclass)
436
        {
437
        case STATIC_PCLASS:             /* offline scheduled tasks */
438
          break;
439
        case HARD_PCLASS:               /* hard aperiodic tasks */
440
          h = (HARD_TASK_MODEL *) m;
441
          if (h->drel == 0 || h->wcet == 0)     /* must be set */
442
            return -1;
443
          break;
444
        case SOFT_PCLASS:               /* soft aperiodic tasks */
445
          u = (SOFT_TASK_MODEL *) m;
446
          if(u->wcet == 0)              /* must be set */
447
            return -1;
448
          break;
449
        default:
450
          return -1;
451
        }
452
 
453
 
1085 pj 454
        /* if the SLSH_task_create is called, then the pclass must be a
455
        valid pclass. Slot-shifting accepts STATIC_TASK, HARD_TASK
456
        and SOFT_TASK models with some restrictions */
457
 
458
        /* est, dl and wcet is saved in slotlengths */
459
        switch(m->pclass)
460
        {      
461
                case STATIC_PCLASS:     /* offline scheduled tasks */
462
                        s = (STATIC_TASK_MODEL *) m;
463
                        lev->tasks[p].est = ceil(s->est/lev->slot_length);             
464
                        lev->tasks[p].dabs = ceil(s->dabs/lev->slot_length);
465
                        lev->tasks[p].interval = s->interval;
466
                        proc_table[p].avail_time = s->wcet;                    
467
                        proc_table[p].wcet = s->wcet;
468
                        break;
469
                case HARD_PCLASS:       /* hard aperiodic tasks */
470
                        h = (HARD_TASK_MODEL *) m;
471
                        if(SLSH_guarantee(lev, h) == 0)
472
                        {
473
                                /* convert drel to dabs */                     
474
                                lev->tasks[p].dabs = ceil((lev->slot + h->drel)/lev->slot_length);
475
                                proc_table[p].avail_time = h->wcet;
476
                                proc_table[p].wcet = h->wcet;
477
                        }
478
                        else /* task not guaranteed */
479
                                return -1;                     
480
                        break;
481
                case SOFT_PCLASS:
482
                        u = (SOFT_TASK_MODEL *) m;
483
                        proc_table[p].avail_time = u->wcet;
484
                        proc_table[p].wcet = u->wcet;
1107 pj 485
                        iq_insertlast(p, &lev->unspecified);    /* respect FIFO order */
1085 pj 486
                        break;
487
                default:        /* a task model not supported */
488
                        return -1;
489
        }
490
        /* enable wcet check in the kernel */
491
        proc_table[p].control |= CONTROL_CAP;
492
 
493
        return 0;
494
}
495
 
496
/************* The slot end event handler *************/
497
static void SLSH_slot_end(void* p)
498
{
499
        SLSH_level_des* lev = (SLSH_level_des *) p;
500
        PID pid;
501
        int i;
502
 
503
        /* increase slot "time" by 1 */
504
        if(lev->slot < lev->LCM)
505
        {
506
                lev->slot++;
507
                /* check if new statics are ready */
508
                for(i = 0; lev->tasks[i].interval != -1; ++i)
509
                {
510
                        if(lev->tasks[i].est <= lev->slot && proc_table[i].status == SLSH_WAIT)
511
                                proc_table[i].status = SLSH_READY;
512
                }
513
 
514
                /* check if current (interval) needs updating */
515
                if(lev->current < SLSH_getInterval(lev->intervals, lev->slot, lev->last))
516
                        lev->current++;
517
 
518
        }
519
        else /* restart from the beginning of the offline schedule */
520
        {
521
                lev->slot = 0;
522
 
1107 pj 523
                while((pid = iq_getfirst(&lev->idle_statics)) != NIL)
1085 pj 524
                {
525
                        if(lev->tasks[pid].est <= lev->slot)
526
                                proc_table[pid].status = SLSH_READY;
527
                        else
528
                                proc_table[pid].status = SLSH_WAIT;
529
                }
530
        }
531
 
532
        /* call for a rescheduling, reset event flag and increase slot by 1  */
533
        lev->slot_event = -1;
534
        kern_printf("*");
535
        event_need_reschedule();
536
}
537
 
538
/* when a task becomes executing (EXE status) */
1123 pj 539
static void SLSH_public_dispatch(LEVEL l, PID pid, int nostop)
1085 pj 540
{
541
        SLSH_level_des *lev = (SLSH_level_des *)(level_table[l]);
542
        struct timespec t;
543
 
544
        /* the task state is set EXE by the scheduler()
545
        we extract the task from the unspecified queue.
546
        NB: we can't assume that p is the first task in the queue!!! */
547
 
548
        if(proc_table[pid].pclass == SOFT_PCLASS)
1107 pj 549
                iq_extract(pid, &lev->unspecified);
1085 pj 550
 
551
        /* also start the timer for one slot length */
552
        lev->slot_event = kern_event_post(&TIME2TIMESPEC(lev->slot_length, t),
553
                                          SLSH_slot_end, (void*) lev);
554
}
555
 
556
/* called when task is moved from EXE status */
1123 pj 557
static void SLSH_public_epilogue(LEVEL l, PID pid)
1085 pj 558
{
559
        SLSH_level_des *lev = (SLSH_level_des *)(level_table[l]);
560
 
561
        /* check if the wcet is finished... */
562
        if (proc_table[pid].avail_time <= 0)
563
        {
564
                /* if it is, raise a XWCET_VIOLATION exception */
565
                kern_raise(XWCET_VIOLATION, pid);
566
                proc_table[pid].status = SLSH_WCET_VIOLATED;
567
        }
568
        else /* the end of a slot. the task returns into the ready queue... */
569
        {
570
                if(proc_table[pid].pclass == SOFT_PCLASS)
1107 pj 571
                        iq_insertfirst(pid,&lev->unspecified);
1085 pj 572
 
573
                proc_table[pid].status = SLSH_READY;
574
        }
575
}
576
 
577
/* when task go from SLEEP to SLSH_READY or SLSH_WAIT */
1123 pj 578
static void SLSH_public_activate(LEVEL l, PID pid)
1085 pj 579
{
580
        SLSH_level_des *lev = (SLSH_level_des *)(level_table[l]);
581
        WORD type = proc_table[pid].pclass;
582
 
583
        /* Test if we are trying to activate a non sleeping task    */
584
        /* Ignore this; the task is already active                  */
585
        if (proc_table[pid].status != SLEEP && proc_table[pid].status != SLSH_WCET_VIOLATED)
586
                return;
587
 
588
        /* make task ready or waiting, dependong on slot (the time) for static tasks only*/
589
        if(type == STATIC_PCLASS && lev->tasks[pid].est <= lev->slot)
590
                proc_table[pid].status = SLSH_READY;
591
        else
592
                proc_table[pid].status = SLSH_WAIT;
593
 
594
        if(type == HARD_PCLASS)
595
                proc_table[pid].status = SLSH_READY;
596
 
597
        /* insert unspecified tasks in QQUEUE and make it ready */
598
        if(type == SOFT_PCLASS)
599
        {              
1107 pj 600
                iq_insertlast(pid ,&lev->unspecified);
1085 pj 601
                proc_table[pid].status = SLSH_READY;
602
        }
603
}
604
 
605
/* when a task i returned to module from a semaphore, mutex ... */
1123 pj 606
static void SLSH_public_unblock(LEVEL l, PID pid)
1085 pj 607
{
608
        SLSH_level_des *lev = (SLSH_level_des *)(level_table[l]);
609
 
610
        /* change staus of task */
611
        proc_table[pid].status = SLSH_READY;
612
 
613
        if(proc_table[pid].pclass == SOFT_PCLASS)
1107 pj 614
                iq_insertfirst(pid ,&lev->unspecified);
1085 pj 615
}
616
 
617
/* when a semaphore, mutex ... taskes a task from module */
1123 pj 618
static void SLSH_public_block(LEVEL l, PID pid)
1085 pj 619
{
620
        /* Extract the running task from the level
621
        . we have already extract it from the ready queue at the dispatch time.
622
        . the capacity event have to be removed by the generic kernel
623
        . the wcet don't need modification...
624
        . the state of the task is set by the calling function
625
        . the deadline must remain...
626
 
627
        So, we do nothing!!!
628
        */
629
}
630
 
631
/* the task has finihed its wcet, kill task (dont kill static tasks)  */
1123 pj 632
static void SLSH_public_end(LEVEL l, PID pid)
1085 pj 633
{
634
        SLSH_level_des *lev = (SLSH_level_des *)(level_table[l]);
635
 
636
        if(proc_table[pid].pclass == SOFT_PCLASS)
637
        {      
638
                if (proc_table[pid].status == SLSH_READY)
1107 pj 639
                        iq_extract(pid, &lev->unspecified);
1085 pj 640
        }
641
        else if(proc_table[pid].pclass == HARD_PCLASS)
642
        {
643
                if (proc_table[pid].status == SLSH_READY)
644
                        lev->tasks[pid].dabs = 0;
645
 
646
        }
647
        /* static tasks: put them in idle QUEUE, reset status and avail_time */
648
        else if(proc_table[pid].pclass == STATIC_PCLASS)
649
        {
650
                proc_table[pid].avail_time = proc_table[pid].wcet;
651
                proc_table[pid].status = SLSH_IDLE;
1107 pj 652
                iq_priority_insert(pid, &lev->idle_statics);
1085 pj 653
        }
654
 
655
        proc_table[pid].status = FREE;
656
}
657
 
658
/* called when a task should sleep but not execute for awhile, mabe a mode change */
1123 pj 659
//static void SLSH_task_sleep(LEVEL l, PID pid)
660
//{
661
//
662
//      /* the task has terminated his job before it consume the wcet. All OK! */
663
//      proc_table[pid].status = SLEEP;
664
//      
665
//      /* we reset the capacity counters... only for static tasks */
666
//      if (proc_table[pid].pclass == STATIC_PCLASS)
667
//              proc_table[pid].avail_time = proc_table[pid].wcet;
668
//              
669
//}
1085 pj 670
 
671
 
672
/** Guest Functions, slot shifing accepts no guests, so all generates exceptions **/
673
 
674
/******* Registration functions *******/
675
 
676
/*+ Registration function: */
1123 pj 677
LEVEL SLSH_register_level()
1085 pj 678
{
679
        LEVEL l;            /* the level that we register */
680
        SLSH_level_des *lev;  /* for readableness only */
681
        PID i;              /* a counter */
682
 
683
        kern_printf("SLSH_register_level\n");
684
 
1123 pj 685
        /* request an entry in the level_table */
686
        l = level_alloc_descriptor(sizeof(SLSH_level_des));
1085 pj 687
 
1123 pj 688
        lev = (SLSH_level_des *)level_table[l];
1085 pj 689
 
1123 pj 690
        printk("    lev=%d\n",(int)lev);
1085 pj 691
 
1123 pj 692
        /* fill the standard descriptor */
693
        lev->l.public_scheduler = SLSH_public_scheduler;
694
        lev->l.public_guarantee = SLSH_public_guarantee;
695
        lev->l.public_create    = SLSH_public_create;
696
        lev->l.public_end       = SLSH_public_end;
697
        lev->l.public_dispatch  = SLSH_public_dispatch;
698
        lev->l.public_epilogue  = SLSH_public_epilogue;
699
        lev->l.public_activate  = SLSH_public_activate;
700
        lev->l.public_unblock   = SLSH_public_unblock;
701
        lev->l.public_block     = SLSH_public_block;
1085 pj 702
 
703
        /* fill the SLSH descriptor part */
704
        for(i = 0; i < MAX_PROC; i++)
705
        {
706
                lev->tasks[i].est = -1;
707
                lev->tasks[i].dabs = 0;
708
                lev->tasks[i].interval = -1;
709
        }
710
 
711
        for(i = 0; i < MAX_INTERVALS; i++)
712
        {
713
                lev->intervals[i].start = -1;
714
                lev->intervals[i].end = -1;
715
                lev->intervals[i].length = 0;
716
                lev->intervals[i].maxt = 0;
717
                lev->intervals[i].sc = 0;
718
        }
719
 
720
        lev->current = 0;
721
        lev->last = NIL;
722
        lev->slot = 0;
723
        lev->slot_length = 0;
724
        lev->slot_event = -1;
1123 pj 725
 
726
        return l;
1085 pj 727
}
728
 
729
 
730
void SLSH_set_interval(LEVEL l, int start, int end, int maxt)
731
{
732
        SLSH_level_des* lev = (SLSH_level_des *)(level_table[l]);
733
        static int i = -1;     
734
 
735
        i++;
736
        lev->intervals[i].start = start;
737
        lev->intervals[i].end = end;
738
        lev->intervals[i].length = end - start;
739
        lev->intervals[i].maxt = maxt;
740
        lev->intervals[i].sc = lev->intervals[i].length - maxt;
741
 
742
        lev->last = i;
743
}
744
 
745
void SLSH_set_variables(LEVEL l, TIME length)
746
{
747
        SLSH_level_des* lev = (SLSH_level_des *)(level_table[l]);
748
 
749
        lev->slot_length = length;                     
750
}