Subversion Repositories shark

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
1085 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
23
 CVS :        $Id: slsh.c,v 1.1.1.1 2002-09-02 09:37:41 pj Exp $
24
 
25
 File:        $File$
26
 Revision:    $Revision: 1.1.1.1 $
27
 Last update: $Date: 2002-09-02 09:37:41 $
28
 ------------
29
 
30
 This file contains the scheduling module for Slot-Shifting.
31
 
32
 Read slsh.h for further details.
33
 
34
**/
35
 
36
/*
37
 * Copyright (C) 2000 Paolo Gai
38
 *
39
 * This program is free software; you can redistribute it and/or modify
40
 * it under the terms of the GNU General Public License as published by
41
 * the Free Software Foundation; either version 2 of the License, or
42
 * (at your option) any later version.
43
 *
44
 * This program is distributed in the hope that it will be useful,
45
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
46
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
47
 * GNU General Public License for more details.
48
 *
49
 * You should have received a copy of the GNU General Public License
50
 * along with this program; if not, write to the Free Software
51
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
52
 *
53
 */
54
 
55
 
56
#include "slsh.h"
57
#include <ll/stdio.h>
58
#include <ll/stdlib.h>
59
#include <ll/string.h>
60
#include <ll/math.h>    /* for ceil(...) */
61
#include <ll/ll.h>      /* for memcpy(...) */
62
#include <kernel/model.h>
63
#include <kernel/descr.h>
64
#include <kernel/var.h>
65
#include <kernel/func.h>
66
 
67
//#define eslsh_printf kern_printf
68
#define slsh_printf printk
69
 
70
/* Keeps information about static and guaranteed tasks */
71
typedef struct {
72
        int est;
73
        int dabs;
74
        int interval;
75
} SLSH_task;
76
 
77
/*+ Status used in the level +*/
78
#define SLSH_READY                      MODULE_STATUS_BASE
79
#define SLSH_WAIT                       MODULE_STATUS_BASE + 1
80
#define SLSH_IDLE                       MODULE_STATUS_BASE + 2
81
#define SLSH_WCET_VIOLATED      MODULE_STATUS_BASE + 3
82
 
83
/*+ defines +*/
84
#define MAX_INTERVALS 1000              /* 1000 intervals is max, for now */
85
 
86
/******+ the level redefinition for the SLOT SHIFT level +*******/
87
typedef struct {
88
        level_des l;                    /*+ the standard level descriptor+*/
89
 
90
        /* task lists */
91
        SLSH_task tasks[MAX_PROC];  /* est and dl's for static and guaranteed task */
92
 
93
        QUEUE idle_statics; /* finished static tasks */
94
 
95
        QQUEUE unspecified;     /* tasks with only a wcet */
96
 
97
        /* the Intervals list */
98
        SLSH_interval intervals[MAX_INTERVALS];
99
        int current;                    /* current interval */
100
        int last;                       /* last interval */
101
 
102
        int slot;                       /* slot shifting time */
103
        TIME slot_length;       /* slothlength in real system time*/
104
        int LCM;                        /* length (in slots) of ofline schedule */
105
 
106
        int slot_event;         /* save the event */
107
} SLSH_level_des;
108
 
109
 
110
/* Which task models the Slot-Shifting module accepts */
111
static int SLSH_level_accept_task_model(LEVEL l, TASK_MODEL* m)
112
{
113
        HARD_TASK_MODEL* h;
114
        SOFT_TASK_MODEL* s;
115
 
116
        /* Check the models */
117
        switch(m->pclass)
118
        {
119
                case STATIC_PCLASS:             /* offline scheduled tasks */
120
                        return 0;
121
                case HARD_PCLASS:               /* hard aperiodic tasks */
122
                        h = (HARD_TASK_MODEL *) m;
123
                        if(h->drel != 0 && h->wcet != 0)        /* must be set */
124
                                return 0;
125
                        break;
126
                case SOFT_PCLASS:               /* soft aperiodic tasks */
127
                        s = (SOFT_TASK_MODEL *) m;
128
                        if(s->wcet != 0)                /* must be set */
129
                                return 0;
130
                        break;
131
                default:
132
        }
133
 
134
        return -1;      /* Not accepted model */
135
}
136
 
137
 
138
static void SLSH_level_status(LEVEL l)
139
{
140
        kern_printf("Level status not implemented\n");
141
}
142
 
143
/* check if some tasks are ready, return 0 if ready, -1 otherwise */
144
static int SLSH_R(SLSH_task* tasks)
145
{
146
        int s;
147
 
148
        /* for all static tasks */
149
        for(s = 0; tasks[s].est != -1; ++s)
150
        {
151
           if(proc_table[s].status == SLSH_READY)
152
               return 0;
153
        }
154
        return -1;
155
}
156
 
157
/* check if unspecified exists, return 0 if it exists, -1 otherwise */
158
static int SLSH_T(QQUEUE unspecified)
159
{
160
        if(unspecified.first != NIL)
161
                return 0;
162
        else
163
                return -1;
164
}
165
 
166
/* return the sc in an interval */
167
static int SLSH_sc(SLSH_interval* intervals, int i)
168
{
169
        return intervals[i].sc;
170
}
171
/* return a static task from current interval or a guaranted task */
172
static PID SLSH_staticOrGuaranteed(SLSH_level_des* lev)
173
{
174
        int lowest_dl = 0;      /* lowest dl found */
175
        PID pid = 0;            /* static or guaranteed task */
176
        int t;
177
 
178
        /* Decide according to EDF, go through all static & guaranteed tasks */
179
        for(t = 0; t < MAX_PROC; ++t)
180
        {
181
                /* static tasks */
182
                if(proc_table[t].pclass == STATIC_PCLASS)
183
                {
184
                        /* static task must belong to current interval */
185
                        if(lev->tasks[t].interval == lev->current)
186
                        {
187
                                /* only ready tasks */
188
                                if(proc_table[t].status == SLSH_READY)
189
                                {
190
                                        /* a new lower dl was found */
191
                                        if(lev->tasks[t].dabs < lowest_dl)
192
                                        {
193
                                                lowest_dl = lev->tasks[t].dabs;
194
                                                pid = t;
195
                                        }
196
                                }
197
                        }
198
                } /* guaranteed tasks */
199
                else if(proc_table[t].pclass == HARD_PCLASS)
200
                {
201
                        /* only ready tasks */
202
                        if(proc_table[t].status == SLSH_READY)
203
                        {
204
                                /* a new lower dl was found */
205
                                if(lev->tasks[t].dabs < lowest_dl)
206
                                {
207
                                        lowest_dl = lev->tasks[t].dabs;
208
                                        pid = t;
209
                                }
210
                        }
211
                }
212
        }/* for all tasks */
213
 
214
        return pid;    
215
}
216
 
217
/* return a static task among the candidates, all ready statics */
218
static PID SLSH_candidates(SLSH_task* tasks)
219
{
220
        int lowest_dl = 0;
221
        PID pid;
222
        int t;
223
 
224
        /* Use the EDL algorithm again to decide which task to run */
225
        for(t = 0; t < MAX_PROC; ++t)
226
        {
227
                /* only static tasks */
228
                if(proc_table[t].pclass == STATIC_PCLASS)
229
                {
230
                        /* only ready tasks */
231
                        if(proc_table[t].status == SLSH_READY)
232
                        {      
233
                                /* a new lower dl was found */
234
                                if(tasks[t].dabs < lowest_dl)
235
                                {
236
                                        lowest_dl = tasks[t].dabs;
237
                                        pid = t;
238
                                }
239
                        }/* all ready tasks */
240
                }/* all static tasks */
241
        }/* for all tasks */
242
 
243
        return pid;
244
}
245
 
246
/* decrease the sc in a interval by amount */
247
void SLSH_decSc(SLSH_interval* intervals, int i, int amount)
248
{
249
        intervals[i].sc -= amount;
250
}
251
 
252
void SLSH_incSc(SLSH_interval* intervals, int i, int amount)
253
{
254
        intervals[i].sc += amount;
255
}
256
 
257
/* swap the sc between intervals, also consider intervals with negative sc */
258
void SLSH_swapSc(SLSH_interval* intervals, int current, int task_interval)
259
{
260
        /* decrease the sc in the current interval */
261
        SLSH_decSc(intervals, current, 1);
262
 
263
        /* update the other interval(s) */
264
        if(intervals[task_interval].sc < 0)     /* negative sc */
265
        {
266
                /* special case, increase next interval sc by 1 and also current interval (borrowing) */
267
                if(task_interval == current + 1)
268
                {
269
                        SLSH_incSc(intervals, task_interval, 1);
270
                        SLSH_incSc(intervals, current, 1);
271
                }
272
                else /* increase every interval sc that is negative between current and task_interval */
273
                {
274
                        while(task_interval > current && intervals[task_interval].sc < 0)
275
                        {
276
                                SLSH_incSc(intervals, task_interval, 1);
277
                                task_interval--;
278
                        }
279
                }
280
        }
281
        else /* ordinary swapping */
282
                SLSH_incSc(intervals, task_interval, 1);
283
}
284
 
285
/* The scheduler, decides which task to run. */
286
static PID SLSH_level_scheduler(LEVEL l)
287
{
288
        SLSH_level_des* lev = (SLSH_level_des *)(level_table[l]);
289
        PID pid;
290
 
291
        /* The scheduler choses among static, guaranteed (hard aperiodic) and
292
             unspecified (soft aperiodic) tasks */
293
        /* no ready tasks and no sc, execute idle task */
294
        if(SLSH_R(lev->tasks) == 0 && SLSH_sc(lev->intervals, lev->current) == 0)
295
                return NIL;
296
        /* must execute a static from current intervall or a guaranteed task */
297
        else if(SLSH_R(lev->tasks) > 0 && SLSH_sc(lev->intervals, lev->current) == 0)
298
                return SLSH_staticOrGuaranteed(lev);
299
        /* sc available... */
300
        else if(SLSH_R(lev->tasks) > 0 && SLSH_sc(lev->intervals, lev->current) > 0)
301
        {
302
                /* If unspecified exist, execute it according to FIFO order */
303
                if(SLSH_T(lev->unspecified) == 0)
304
                {
305
                        SLSH_decSc(lev->intervals, lev->current, 1);    /* decrease sc by 1 */
306
                        return (PID)qq_getfirst(&lev->unspecified);
307
                }
308
                else /* No unspecified, execute task from candidates (statics) */
309
                {              
310
                        pid = SLSH_candidates(lev->tasks);
311
 
312
                        /* sc needs to be swapped */
313
                        if(lev->tasks[pid].interval != lev->current)
314
                                SLSH_swapSc(lev->intervals, lev->tasks[pid].interval, lev->current);
315
 
316
                        return pid;            
317
                }              
318
        }
319
 
320
        kern_printf("(SLSH s)");
321
        return NIL;
322
}
323
 
324
/* not used, slot-shifting handles all guarantees itself, it handles all bandwidth */
325
static int SLSH_level_guarantee(LEVEL l, bandwidth_t *freebandwidth)
326
{
327
        *freebandwidth = 0;
328
        return 1;
329
}
330
 
331
/* get the interval that x is in */
332
static int SLSH_getInterval(SLSH_interval* intervals, int x, int last)
333
{
334
        int i;
335
 
336
        /* search through the intervals  */
337
        for(i = 0; i <= last; ++i)
338
        {
339
                /* I is in the interval where start is smaller or equal and end is bigger */           
340
                if(intervals[i].start <= x && x < intervals[i].end)
341
                        return i;
342
        }
343
        return -1;
344
}
345
 
346
/* get the start of the interval I */
347
static int SLSH_intervalStart(SLSH_interval* intervals, int I)
348
{
349
        return intervals[I].start;
350
}
351
 
352
/* split interval I into two parts, slow because of copying. OBS!!! no check if there is
353
    enough space in the intervals array */
354
static void SLSH_splitInterval(SLSH_level_des* lev, int I, int dabs)
355
{
356
        SLSH_interval left_interval;
357
        int i;
358
 
359
 
360
        lev->last++;
361
 
362
        /* move every interval above and including I */
363
        for(i = lev->last; i > I; --i)
364
                memcpy(&lev->intervals[i], &lev->intervals[i - 1], sizeof(SLSH_interval));
365
 
366
        /* Left interval start, end and length */
367
        left_interval.start = lev->intervals[I].start;
368
        left_interval.end = dabs;
369
        left_interval.length = left_interval.end - left_interval.start;
370
 
371
        /* Right interval (uses old interval struct) start and length end remains as the old value */
372
        lev->intervals[I + 1].start = dabs;
373
        lev->intervals[I + 1].length = lev->intervals[I + 1].end - lev->intervals[I + 1].start;
374
 
375
        /* check if sc still exists in the right interval */
376
        if(lev->intervals[I + 1].length - lev->intervals[I + 1].maxt > 0)
377
        {
378
                lev->intervals[I + 1].sc = lev->intervals[I + 1].length - lev->intervals[I + 1].maxt;
379
                left_interval.sc = left_interval.length; /* the whole interval is free, for now... */
380
        }
381
        else /* no sc in the right interval */
382
        {
383
                lev->intervals[I + 1].maxt = lev->intervals[I + 1].length;
384
                left_interval.sc = lev->intervals[I + 1].sc; /* all sc in left interval */
385
                lev->intervals[I + 1].sc = 0;
386
        }        
387
 
388
        /* insert the new interval */
389
        memcpy(&lev->intervals[I], &left_interval, sizeof(SLSH_interval));
390
}
391
 
392
/* Reduce the sc from back to front by the wcet amount, interval splitting may be neccesary */
393
static void SLSH_updateSc(SLSH_level_des* lev, HARD_TASK_MODEL* h)
394
{
395
        int dabs = ceil((lev->slot + h->drel)/lev->slot_length); /* absolute deadline of request */
396
        int dabs_interval = SLSH_getInterval(lev->intervals, dabs, lev->last); /* interval where dabs is */
397
        int C = ceil(h->wcet/lev->slot_length); /* amount of sc to reduce */   
398
        int sc = 0;
399
        int i;
400
 
401
        /* check if interval splitting is neccesary */
402
        if(lev->intervals[dabs_interval].end != dabs)
403
                SLSH_splitInterval(lev, dabs_interval, dabs);
404
 
405
        /* decrease sc in all intervals that are neccesary from dabs_interval o current */
406
        for(i = dabs_interval; i >= lev->current && C > 0; --i)
407
        {
408
                if((sc = SLSH_sc(lev->intervals, i)) >= 0) /* only decrease where sc exists */
409
                {
410
                        if(sc > C) /* the last sc dec */
411
                        {
412
                                SLSH_decSc(lev->intervals, i, C);
413
                                C = 0;
414
                        }
415
                        else    /* to little sc in this interval, decrease it to 0 */
416
                        {
417
                                C -= SLSH_sc(lev->intervals, i);
418
                                SLSH_decSc(lev->intervals, i, SLSH_sc(lev->intervals, i));
419
                        }              
420
                }
421
        }/* for all intervals */
422
}
423
 
424
/* the guarantee algorithm for hard aperiodic requests */
425
static int SLSH_guarantee(SLSH_level_des* lev, HARD_TASK_MODEL* h)
426
{
427
        int total_sc = 0;
428
        int temp, i;
429
        int dabs = ceil((lev->slot + h->drel)/lev->slot_length); /* absolute deadline of request */
430
        int dabs_interval = SLSH_getInterval(lev->intervals, dabs, lev->last); /* interval where dabs is */
431
 
432
        /* check if the sc up until request deadline is >= request wcet */
433
        /* 1. the sc of the current interal */
434
        total_sc = SLSH_sc(lev->intervals, lev->current);
435
 
436
        /* 2. the sc for all whole intervals between current and the interval
437
            with the request deadline */
438
        for(i = (lev->current) + 1; i < dabs_interval; ++i)
439
        {
440
                if((temp = SLSH_sc(lev->intervals, i)) > 0)
441
                        total_sc += temp;
442
        }
443
 
444
        /* 3. the min of sc or the execution need in the last interval */
445
        total_sc += min(SLSH_sc(lev->intervals, dabs_interval),
446
                                dabs - SLSH_intervalStart(lev->intervals,
447
                                dabs_interval));
448
 
449
        if(total_sc >= h->wcet)
450
        {       /* update the sc in the intervals from back to front */
451
                SLSH_updateSc(lev, h);
452
                return 0;
453
        }
454
        else
455
                return -1;
456
}
457
 
458
/* check if task model is accepted and store nessecary parameters */
459
static int SLSH_task_create(LEVEL l, PID p, TASK_MODEL *m)
460
{
461
        SLSH_level_des *lev = (SLSH_level_des *)(level_table[l]);
462
        STATIC_TASK_MODEL* s;
463
        HARD_TASK_MODEL* h;
464
        SOFT_TASK_MODEL* u;
465
 
466
        /* if the SLSH_task_create is called, then the pclass must be a
467
        valid pclass. Slot-shifting accepts STATIC_TASK, HARD_TASK
468
        and SOFT_TASK models with some restrictions */
469
 
470
        /* est, dl and wcet is saved in slotlengths */
471
        switch(m->pclass)
472
        {      
473
                case STATIC_PCLASS:     /* offline scheduled tasks */
474
                        s = (STATIC_TASK_MODEL *) m;
475
                        lev->tasks[p].est = ceil(s->est/lev->slot_length);             
476
                        lev->tasks[p].dabs = ceil(s->dabs/lev->slot_length);
477
                        lev->tasks[p].interval = s->interval;
478
                        proc_table[p].avail_time = s->wcet;                    
479
                        proc_table[p].wcet = s->wcet;
480
                        break;
481
                case HARD_PCLASS:       /* hard aperiodic tasks */
482
                        h = (HARD_TASK_MODEL *) m;
483
                        if(SLSH_guarantee(lev, h) == 0)
484
                        {
485
                                /* convert drel to dabs */                     
486
                                lev->tasks[p].dabs = ceil((lev->slot + h->drel)/lev->slot_length);
487
                                proc_table[p].avail_time = h->wcet;
488
                                proc_table[p].wcet = h->wcet;
489
                        }
490
                        else /* task not guaranteed */
491
                                return -1;                     
492
                        break;
493
                case SOFT_PCLASS:
494
                        u = (SOFT_TASK_MODEL *) m;
495
                        proc_table[p].avail_time = u->wcet;
496
                        proc_table[p].wcet = u->wcet;
497
                        qq_insertlast(p, &lev->unspecified);    /* respect FIFO order */
498
                        break;
499
                default:        /* a task model not supported */
500
                        return -1;
501
        }
502
        /* enable wcet check in the kernel */
503
        proc_table[p].control |= CONTROL_CAP;
504
 
505
        return 0;
506
}
507
 
508
static void SLSH_task_detach(LEVEL l, PID p)
509
{
510
        /* do nothing */       
511
}
512
 
513
/* check if a task chosen by scheduler is correct */
514
static int SLSH_task_eligible(LEVEL l, PID p)
515
{
516
        return 0; /* if the task p is chosen, it is always eligible */
517
}
518
 
519
/************* The slot end event handler *************/
520
static void SLSH_slot_end(void* p)
521
{
522
        SLSH_level_des* lev = (SLSH_level_des *) p;
523
        PID pid;
524
        int i;
525
 
526
        /* increase slot "time" by 1 */
527
        if(lev->slot < lev->LCM)
528
        {
529
                lev->slot++;
530
                /* check if new statics are ready */
531
                for(i = 0; lev->tasks[i].interval != -1; ++i)
532
                {
533
                        if(lev->tasks[i].est <= lev->slot && proc_table[i].status == SLSH_WAIT)
534
                                proc_table[i].status = SLSH_READY;
535
                }
536
 
537
                /* check if current (interval) needs updating */
538
                if(lev->current < SLSH_getInterval(lev->intervals, lev->slot, lev->last))
539
                        lev->current++;
540
 
541
        }
542
        else /* restart from the beginning of the offline schedule */
543
        {
544
                lev->slot = 0;
545
 
546
                while((pid = q_getfirst(&lev->idle_statics)) != NIL)
547
                {
548
                        if(lev->tasks[pid].est <= lev->slot)
549
                                proc_table[pid].status = SLSH_READY;
550
                        else
551
                                proc_table[pid].status = SLSH_WAIT;
552
                }
553
        }
554
 
555
        /* call for a rescheduling, reset event flag and increase slot by 1  */
556
        lev->slot_event = -1;
557
        kern_printf("*");
558
        event_need_reschedule();
559
}
560
 
561
/* when a task becomes executing (EXE status) */
562
static void SLSH_task_dispatch(LEVEL l, PID pid, int nostop)
563
{
564
        SLSH_level_des *lev = (SLSH_level_des *)(level_table[l]);
565
        struct timespec t;
566
 
567
        /* the task state is set EXE by the scheduler()
568
        we extract the task from the unspecified queue.
569
        NB: we can't assume that p is the first task in the queue!!! */
570
 
571
        if(proc_table[pid].pclass == SOFT_PCLASS)
572
                qq_extract(pid, &lev->unspecified);
573
 
574
        /* also start the timer for one slot length */
575
        lev->slot_event = kern_event_post(&TIME2TIMESPEC(lev->slot_length, t),
576
                                          SLSH_slot_end, (void*) lev);
577
}
578
 
579
/* called when task is moved from EXE status */
580
static void SLSH_task_epilogue(LEVEL l, PID pid)
581
{
582
        SLSH_level_des *lev = (SLSH_level_des *)(level_table[l]);
583
 
584
        /* check if the wcet is finished... */
585
        if (proc_table[pid].avail_time <= 0)
586
        {
587
                /* if it is, raise a XWCET_VIOLATION exception */
588
                kern_raise(XWCET_VIOLATION, pid);
589
                proc_table[pid].status = SLSH_WCET_VIOLATED;
590
        }
591
        else /* the end of a slot. the task returns into the ready queue... */
592
        {
593
                if(proc_table[pid].pclass == SOFT_PCLASS)
594
                        qq_insertfirst(pid,&lev->unspecified);
595
 
596
                proc_table[pid].status = SLSH_READY;
597
        }
598
}
599
 
600
/* when task go from SLEEP to SLSH_READY or SLSH_WAIT */
601
static void SLSH_task_activate(LEVEL l, PID pid)
602
{
603
        SLSH_level_des *lev = (SLSH_level_des *)(level_table[l]);
604
        WORD type = proc_table[pid].pclass;
605
 
606
        /* Test if we are trying to activate a non sleeping task    */
607
        /* Ignore this; the task is already active                  */
608
        if (proc_table[pid].status != SLEEP && proc_table[pid].status != SLSH_WCET_VIOLATED)
609
                return;
610
 
611
        /* make task ready or waiting, dependong on slot (the time) for static tasks only*/
612
        if(type == STATIC_PCLASS && lev->tasks[pid].est <= lev->slot)
613
                proc_table[pid].status = SLSH_READY;
614
        else
615
                proc_table[pid].status = SLSH_WAIT;
616
 
617
        if(type == HARD_PCLASS)
618
                proc_table[pid].status = SLSH_READY;
619
 
620
        /* insert unspecified tasks in QQUEUE and make it ready */
621
        if(type == SOFT_PCLASS)
622
        {              
623
                qq_insertlast(pid ,&lev->unspecified);
624
                proc_table[pid].status = SLSH_READY;
625
        }
626
}
627
 
628
/* when a task i returned to module from a semaphore, mutex ... */
629
static void SLSH_task_insert(LEVEL l, PID pid)
630
{
631
        SLSH_level_des *lev = (SLSH_level_des *)(level_table[l]);
632
 
633
        /* change staus of task */
634
        proc_table[pid].status = SLSH_READY;
635
 
636
        if(proc_table[pid].pclass == SOFT_PCLASS)
637
                qq_insertfirst(pid ,&lev->unspecified);
638
}
639
 
640
/* when a semaphore, mutex ... taskes a task from module */
641
static void SLSH_task_extract(LEVEL l, PID pid)
642
{
643
        /* Extract the running task from the level
644
        . we have already extract it from the ready queue at the dispatch time.
645
        . the capacity event have to be removed by the generic kernel
646
        . the wcet don't need modification...
647
        . the state of the task is set by the calling function
648
        . the deadline must remain...
649
 
650
        So, we do nothing!!!
651
        */
652
}
653
 
654
/* task has finished execution for this period */
655
static void SLSH_task_endcycle(LEVEL l, PID pid)
656
{
657
        /* do nothing */
658
}
659
 
660
/* the task has finihed its wcet, kill task (dont kill static tasks)  */
661
static void SLSH_task_end(LEVEL l, PID pid)
662
{
663
        SLSH_level_des *lev = (SLSH_level_des *)(level_table[l]);
664
 
665
        if(proc_table[pid].pclass == SOFT_PCLASS)
666
        {      
667
                if (proc_table[pid].status == SLSH_READY)
668
                        qq_extract(pid, &lev->unspecified);
669
        }
670
        else if(proc_table[pid].pclass == HARD_PCLASS)
671
        {
672
                if (proc_table[pid].status == SLSH_READY)
673
                        lev->tasks[pid].dabs = 0;
674
 
675
        }
676
        /* static tasks: put them in idle QUEUE, reset status and avail_time */
677
        else if(proc_table[pid].pclass == STATIC_PCLASS)
678
        {
679
                proc_table[pid].avail_time = proc_table[pid].wcet;
680
                proc_table[pid].status = SLSH_IDLE;
681
                q_insert(pid, &lev->idle_statics);
682
        }
683
 
684
        proc_table[pid].status = FREE;
685
}
686
 
687
/* called when a task should sleep but not execute for awhile, mabe a mode change */
688
static void SLSH_task_sleep(LEVEL l, PID pid)
689
{
690
 
691
        /* the task has terminated his job before it consume the wcet. All OK! */
692
        proc_table[pid].status = SLEEP;
693
 
694
        /* we reset the capacity counters... only for static tasks */
695
        if (proc_table[pid].pclass == STATIC_PCLASS)
696
                proc_table[pid].avail_time = proc_table[pid].wcet;
697
 
698
}
699
 
700
static void SLSH_task_delay(LEVEL l, PID p, TIME usdelay) { }
701
 
702
/** Guest Functions, slot shifing accepts no guests, so all generates exceptions **/
703
 
704
static int SLSH_level_accept_guest_model(LEVEL l, TASK_MODEL* m) { return -1; }
705
 
706
static int SLSH_guest_create(LEVEL l, PID p, TASK_MODEL *m)
707
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0;     }
708
 
709
static void SLSH_guest_detach(LEVEL l, PID p)
710
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
711
 
712
static void SLSH_guest_dispatch(LEVEL l, PID p, int nostop)
713
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
714
 
715
static void SLSH_guest_epilogue(LEVEL l, PID p)
716
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
717
 
718
static void SLSH_guest_activate(LEVEL l, PID p)
719
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
720
 
721
static void SLSH_guest_insert(LEVEL l, PID p)
722
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
723
 
724
static void SLSH_guest_extract(LEVEL l, PID p)
725
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
726
 
727
static void SLSH_guest_endcycle(LEVEL l, PID p)
728
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
729
 
730
static void SLSH_guest_end(LEVEL l, PID p)
731
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
732
 
733
static void SLSH_guest_sleep(LEVEL l, PID p)
734
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
735
 
736
static void SLSH_guest_delay(LEVEL l, PID p, TIME usdelay)
737
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
738
 
739
/******* Registration functions *******/
740
 
741
/*+ Registration function: */
742
void SLSH_register_level()
743
{
744
        LEVEL l;            /* the level that we register */
745
        SLSH_level_des *lev;  /* for readableness only */
746
        PID i;              /* a counter */
747
 
748
        kern_printf("SLSH_register_level\n");
749
 
750
        /* request an entry in the level_table */
751
        l = level_alloc_descriptor();
752
 
753
        /* alloc the space needed for the EDF_level_des */
754
        lev = (SLSH_level_des *)kern_alloc(sizeof(SLSH_level_des));
755
 
756
        /* update the level_table with the new entry */
757
        level_table[l] = (level_des *)lev;
758
 
759
        /* fill the standard descriptor */
760
        strncpy(lev->l.level_name, SLSH_LEVELNAME, MAX_LEVELNAME);
761
 
762
        lev->l.level_code                       = SLSH_LEVEL_CODE;
763
        lev->l.level_version                    = SLSH_LEVEL_VERSION;
764
 
765
        lev->l.level_accept_task_model          = SLSH_level_accept_task_model;
766
        lev->l.level_accept_guest_model         = SLSH_level_accept_guest_model;
767
        lev->l.level_status                     = SLSH_level_status;
768
        lev->l.level_scheduler                  = SLSH_level_scheduler;
769
 
770
        lev->l.level_guarantee                  = SLSH_level_guarantee;
771
 
772
        lev->l.task_create                      = SLSH_task_create;
773
        lev->l.task_detach                      = SLSH_task_detach;
774
        lev->l.task_eligible                    = SLSH_task_eligible;
775
        lev->l.task_dispatch                    = SLSH_task_dispatch;
776
        lev->l.task_epilogue                    = SLSH_task_epilogue;
777
        lev->l.task_activate                    = SLSH_task_activate;
778
        lev->l.task_insert                      = SLSH_task_insert;
779
        lev->l.task_extract                     = SLSH_task_extract;
780
        lev->l.task_endcycle                    = SLSH_task_endcycle;
781
        lev->l.task_end                         = SLSH_task_end;
782
        lev->l.task_sleep                       = SLSH_task_sleep;
783
        lev->l.task_delay                       = SLSH_task_delay;
784
 
785
        lev->l.guest_create                     = SLSH_guest_create;
786
        lev->l.guest_detach                     = SLSH_guest_detach;
787
        lev->l.guest_dispatch                   = SLSH_guest_dispatch;
788
        lev->l.guest_epilogue                   = SLSH_guest_epilogue;
789
        lev->l.guest_activate                   = SLSH_guest_activate;
790
        lev->l.guest_insert                     = SLSH_guest_insert;
791
        lev->l.guest_extract                    = SLSH_guest_extract;
792
        lev->l.guest_endcycle                   = SLSH_guest_endcycle;
793
        lev->l.guest_end                        = SLSH_guest_end;
794
        lev->l.guest_sleep                      = SLSH_guest_sleep;
795
        lev->l.guest_delay                      = SLSH_guest_delay;
796
 
797
        /* fill the SLSH descriptor part */
798
        for(i = 0; i < MAX_PROC; i++)
799
        {
800
                lev->tasks[i].est = -1;
801
                lev->tasks[i].dabs = 0;
802
                lev->tasks[i].interval = -1;
803
        }
804
 
805
        for(i = 0; i < MAX_INTERVALS; i++)
806
        {
807
                lev->intervals[i].start = -1;
808
                lev->intervals[i].end = -1;
809
                lev->intervals[i].length = 0;
810
                lev->intervals[i].maxt = 0;
811
                lev->intervals[i].sc = 0;
812
        }
813
 
814
        lev->current = 0;
815
        lev->last = NIL;
816
        lev->slot = 0;
817
        lev->slot_length = 0;
818
        lev->slot_event = -1;
819
}
820
 
821
 
822
void SLSH_set_interval(LEVEL l, int start, int end, int maxt)
823
{
824
        SLSH_level_des* lev = (SLSH_level_des *)(level_table[l]);
825
        static int i = -1;     
826
 
827
        i++;
828
        lev->intervals[i].start = start;
829
        lev->intervals[i].end = end;
830
        lev->intervals[i].length = end - start;
831
        lev->intervals[i].maxt = maxt;
832
        lev->intervals[i].sc = lev->intervals[i].length - maxt;
833
 
834
        lev->last = i;
835
}
836
 
837
void SLSH_set_variables(LEVEL l, TIME length)
838
{
839
        SLSH_level_des* lev = (SLSH_level_des *)(level_table[l]);
840
 
841
        lev->slot_length = length;                     
842
}