Subversion Repositories shark

Rev

Rev 14 | Rev 38 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
29 pj 23
 CVS :        $Id: rm.c,v 1.3 2002-11-11 08:32:06 pj Exp $
2 pj 24
 
25
 File:        $File$
29 pj 26
 Revision:    $Revision: 1.3 $
27
 Last update: $Date: 2002-11-11 08:32:06 $
2 pj 28
 ------------
29
 
30
 This file contains the scheduling module RM (Rate Monotonic)
31
 
32
 Read rm.h for further details.
33
 
34
 This file is equal to EDF.c except for:
35
 
36
 . EDF changed to RM :-)
37
 . q_timespec_insert changed to q_insert
38
 . proc_table[p].priority is also modified when we modify lev->period[p]
39
 
40
 
41
**/
42
 
43
/*
44
 * Copyright (C) 2000 Paolo Gai
45
 *
46
 * This program is free software; you can redistribute it and/or modify
47
 * it under the terms of the GNU General Public License as published by
48
 * the Free Software Foundation; either version 2 of the License, or
49
 * (at your option) any later version.
50
 *
51
 * This program is distributed in the hope that it will be useful,
52
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
53
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
54
 * GNU General Public License for more details.
55
 *
56
 * You should have received a copy of the GNU General Public License
57
 * along with this program; if not, write to the Free Software
58
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
59
 *
60
 */
61
 
62
 
63
#include <modules/rm.h>
64
#include <ll/stdio.h>
65
#include <ll/string.h>
66
#include <kernel/model.h>
67
#include <kernel/descr.h>
68
#include <kernel/var.h>
69
#include <kernel/func.h>
70
#include <kernel/trace.h>
71
 
72
/*+ Status used in the level +*/
73
#define RM_READY         MODULE_STATUS_BASE    /*+ - Ready status        +*/
74
#define RM_WCET_VIOLATED MODULE_STATUS_BASE+2  /*+ when wcet is finished +*/
75
#define RM_WAIT          MODULE_STATUS_BASE+3  /*+ to wait the deadline  +*/
76
#define RM_IDLE          MODULE_STATUS_BASE+4  /*+ to wait the deadline  +*/
77
#define RM_ZOMBIE        MODULE_STATUS_BASE+5  /*+ to wait the free time +*/
78
 
79
/*+ flags +*/
80
#define RM_FLAG_SPORADIC    1
81
#define RM_FLAG_NORAISEEXC  2
82
 
83
/*+ the level redefinition for the Rate Monotonic +*/
84
typedef struct {
85
  level_des l;     /*+ the standard level descriptor          +*/
86
 
87
  TIME period[MAX_PROC]; /*+ The task periods; the deadlines are
88
                       stored in the priority field           +*/
89
  int deadline_timer[MAX_PROC];
90
                   /*+ The task deadline timers               +*/
91
 
92
  int flag[MAX_PROC];
93
                   /*+ used to manage the JOB_TASK_MODEL and the
94
                       periodicity                            +*/
95
 
29 pj 96
  IQUEUE ready;     /*+ the ready queue                        +*/
2 pj 97
 
98
  int flags;       /*+ the init flags...                      +*/
99
 
100
  bandwidth_t U;   /*+ the used bandwidth                     +*/
101
 
102
} RM_level_des;
103
 
104
 
105
static char *RM_status_to_a(WORD status)
106
{
107
  if (status < MODULE_STATUS_BASE)
108
    return status_to_a(status);
109
 
110
  switch (status) {
111
    case RM_READY        : return "RM_Ready";
112
    case RM_WCET_VIOLATED: return "RM_Wcet_Violated";
113
    case RM_WAIT         : return "RM_Sporadic_Wait";
114
    case RM_IDLE         : return "RM_Idle";
115
    case RM_ZOMBIE       : return "RM_Zombie";
116
    default              : return "RM_Unknown";
117
  }
118
}
119
 
120
static void RM_timer_deadline(void *par)
121
{
122
  PID p = (PID) par;
123
  RM_level_des *lev;
29 pj 124
  struct timespec *temp;
2 pj 125
 
126
  lev = (RM_level_des *)level_table[proc_table[p].task_level];
127
 
128
  switch (proc_table[p].status) {
129
    case RM_ZOMBIE:
130
      /* we finally put the task in the ready queue */
131
      proc_table[p].status = FREE;
29 pj 132
      iq_insertfirst(p,&freedesc);
2 pj 133
      /* and free the allocated bandwidth */
134
      lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
135
      break;
136
 
137
    case RM_IDLE:
138
      /* tracer stuff */
139
      trc_logevent(TRC_INTACTIVATION,&p);
140
      /* similar to RM_task_activate */
29 pj 141
      temp = iq_query_timespec(p, &lev->ready);
142
      TIMESPEC_ASSIGN(&proc_table[p].request_time, temp);
143
      ADDUSEC2TIMESPEC(lev->period[p], temp);
2 pj 144
      proc_table[p].status = RM_READY;
29 pj 145
      iq_priority_insert(p,&lev->ready);
146
      lev->deadline_timer[p] = kern_event_post(temp,
2 pj 147
                                               RM_timer_deadline,
148
                                               (void *)p);
149
      //printk("(d%d idle priority set to %d)",p,proc_table[p].priority );
150
      event_need_reschedule();
151
      printk("el%d|",p);
152
      break;
153
 
154
    case RM_WAIT:
155
      /* Without this, the task cannot be reactivated!!! */
156
      proc_table[p].status = SLEEP;
157
      break;
158
 
159
    default:
160
      /* else, a deadline miss occurred!!! */
161
      kern_printf("timer_deadline:AAARRRGGGHHH!!!");
162
      kern_raise(XDEADLINE_MISS,p);
163
  }
164
}
165
 
166
static void RM_timer_guest_deadline(void *par)
167
{
168
  PID p = (PID) par;
169
 
170
  kern_printf("AAARRRGGGHHH!!!");
171
  kern_raise(XDEADLINE_MISS,p);
172
}
173
 
174
static int RM_level_accept_task_model(LEVEL l, TASK_MODEL *m)
175
{
176
  if (m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) {
177
    HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m;
178
 
179
    if (h->wcet && h->mit)
180
      return 0;
181
  }
182
 
183
  return -1;
184
}
185
 
186
static int RM_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
187
{
188
  if (m->pclass == JOB_PCLASS || m->pclass == (JOB_PCLASS | l))
189
    return 0;
190
  else
191
    return -1;
192
}
193
 
194
 
195
static char *onoff(int i)
196
{
197
  if (i)
198
    return "On ";
199
  else
200
    return "Off";
201
}
202
 
203
static void RM_level_status(LEVEL l)
204
{
205
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
29 pj 206
  PID p = iq_query_first(&lev->ready);
2 pj 207
 
208
  kern_printf("Wcet     Check    : %s\n",
209
            onoff(lev->flags & RM_ENABLE_WCET_CHECK));
210
  kern_printf("On-line guarantee : %s\n",
211
            onoff(lev->flags & RM_ENABLE_GUARANTEE));
212
  kern_printf("Used Bandwidth    : %u/%u\n",
213
            lev->U, MAX_BANDWIDTH);
214
 
215
  while (p != NIL) {
216
    if ((proc_table[p].pclass) == JOB_PCLASS)
217
      kern_printf("Pid: %2d (GUEST)\n", p);
218
    else
219
      kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n",
220
              p,
221
              proc_table[p].name,
222
              lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period  ",
223
              lev->period[p],
29 pj 224
              iq_query_timespec(p, &lev->ready)->tv_sec,
225
              iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
2 pj 226
              RM_status_to_a(proc_table[p].status));
29 pj 227
    p = iq_query_next(p, &lev->ready);
2 pj 228
  }
229
 
230
  for (p=0; p<MAX_PROC; p++)
231
    if (proc_table[p].task_level == l && proc_table[p].status != RM_READY
232
        && proc_table[p].status != FREE )
233
      kern_printf("Pid: %2d Name: %10s %s: %9ld Dline: %9ld.%6ld Stat: %s\n",
234
                p,
235
                proc_table[p].name,
236
                lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period  ",
237
                lev->period[p],
29 pj 238
                iq_query_timespec(p, &lev->ready)->tv_sec,
239
                iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
2 pj 240
                RM_status_to_a(proc_table[p].status));
241
}
242
 
243
/* The scheduler only gets the first task in the queue */
244
static PID RM_level_scheduler(LEVEL l)
245
{
246
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
247
 
248
/*  {  // print 4 dbg the ready queue
249
    PID p= lev->ready;
250
    kern_printf("(s");
251
    while (p != NIL) {
252
      kern_printf("%d ",p);
253
      p = proc_table[p].next;
254
    }
255
    kern_printf(") ");
256
  }
257
  */
29 pj 258
  return iq_query_first(&lev->ready);
2 pj 259
}
260
 
261
/* The on-line guarantee is enabled only if the appropriate flag is set... */
262
static int RM_level_guarantee(LEVEL l, bandwidth_t *freebandwidth)
263
{
264
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
265
 
266
  if (lev->flags & RM_FAILED_GUARANTEE) {
267
    *freebandwidth = 0;
268
    return 0;
269
  }
270
  else
271
    if (*freebandwidth >= lev->U) {
272
      *freebandwidth -= lev->U;
273
      return 1;
274
    }
275
    else
276
      return 0;
277
 
278
}
279
 
280
static int RM_task_create(LEVEL l, PID p, TASK_MODEL *m)
281
{
282
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
283
 
284
  /* if the RM_task_create is called, then the pclass must be a
285
     valid pclass. */
286
 
287
  HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m;
288
 
29 pj 289
  *iq_query_priority(p, &lev->ready) = lev->period[p] = h->mit;
2 pj 290
 
291
  if (h->periodicity == APERIODIC)
292
    lev->flag[p] = RM_FLAG_SPORADIC;
293
  else
294
    lev->flag[p] = 0;
295
  lev->deadline_timer[p] = -1;
296
 
297
  /* Enable wcet check */
298
  if (lev->flags & RM_ENABLE_WCET_CHECK) {
299
    proc_table[p].avail_time = h->wcet;
300
    proc_table[p].wcet       = h->wcet;
301
    proc_table[p].control |= CONTROL_CAP;
302
  }
303
 
304
  /* update the bandwidth... */
305
  if (lev->flags & RM_ENABLE_GUARANTEE) {
306
    bandwidth_t b;
307
    b = (MAX_BANDWIDTH / h->mit) * h->wcet;
308
 
309
    /* really update lev->U, checking an overflow... */
310
    if (MAX_BANDWIDTH - lev->U > b)
311
      lev->U += b;
312
    else
313
      /* The task can NOT be guaranteed (U>MAX_BANDWIDTH)...
314
         in this case, we don't raise an exception... in fact, after the
315
         RM_task_create the task_create will call level_guarantee that return
316
         -1... return -1 in RM_task_create isn't correct, because:
317
           . generally, the guarantee must be done when also the resources
318
             are registered
319
           . returning -1 will cause the task_create to return with an errno
320
             ETASK_CREATE instead of ENO_GUARANTEE!!!
321
 
322
         Why I use the flag??? because if the lev->U overflows, if i.e. I set
323
         it to MAX_BANDWIDTH, I lose the correct allocated bandwidth...
324
      */
325
      lev->flags |= RM_FAILED_GUARANTEE;
326
  }
327
 
328
  return 0; /* OK, also if the task cannot be guaranteed... */
329
}
330
 
331
static void RM_task_detach(LEVEL l, PID p)
332
{
333
  /* the RM level doesn't introduce any dinamic allocated new field.
334
     we have only to reset the NO_GUARANTEE FIELD and decrement the allocated
335
     bandwidth */
336
 
337
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
338
 
339
  if (lev->flags & RM_FAILED_GUARANTEE)
340
    lev->flags &= ~RM_FAILED_GUARANTEE;
341
  else
342
    lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet;
343
}
344
 
345
static int RM_task_eligible(LEVEL l, PID p)
346
{
347
  return 0; /* if the task p is chosen, it is always eligible */
348
}
349
 
350
static void RM_task_dispatch(LEVEL l, PID p, int nostop)
351
{
352
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
353
 
354
//  kern_printf("(disp %d)",p);
355
 
356
  /* the task state is set EXE by the scheduler()
357
     we extract the task from the ready queue
358
     NB: we can't assume that p is the first task in the queue!!! */
29 pj 359
  iq_extract(p, &lev->ready);
2 pj 360
}
361
 
362
static void RM_task_epilogue(LEVEL l, PID p)
363
{
364
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
365
 
366
//  kern_printf("(epil %d)",p);
367
 
368
  /* check if the wcet is finished... */
369
  if ((lev->flags & RM_ENABLE_WCET_CHECK) && proc_table[p].avail_time <= 0) {
370
    /* if it is, raise a XWCET_VIOLATION exception */
371
    kern_raise(XWCET_VIOLATION,p);
372
    proc_table[p].status = RM_WCET_VIOLATED;
373
  }
374
  else {
375
    /* the task has been preempted. it returns into the ready queue... */
29 pj 376
    iq_priority_insert(p,&lev->ready);
2 pj 377
    proc_table[p].status = RM_READY;
378
  }
379
}
380
 
381
static void RM_task_activate(LEVEL l, PID p)
382
{
383
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
29 pj 384
  struct timespec *temp;
2 pj 385
 
386
  if (proc_table[p].status == RM_WAIT) {
387
    kern_raise(XACTIVATION,p);
388
    return;
389
  }
390
 
391
  /* Test if we are trying to activate a non sleeping task    */
392
  /* Ignore this; the task is already active                  */
393
  if (proc_table[p].status != SLEEP &&
394
      proc_table[p].status != RM_WCET_VIOLATED)
395
    return;
396
 
397
 
398
  /* see also RM_timer_deadline */
399
  ll_gettime(TIME_EXACT, &proc_table[p].request_time);
400
 
29 pj 401
  temp = iq_query_timespec(p, &lev->ready);
402
  TIMESPEC_ASSIGN(temp, &proc_table[p].request_time);
403
  ADDUSEC2TIMESPEC(lev->period[p], temp);
2 pj 404
 
405
  /* Insert task in the correct position */
406
  proc_table[p].status = RM_READY;
29 pj 407
  iq_priority_insert(p,&lev->ready);
2 pj 408
 
409
  /* Set the deadline timer */
29 pj 410
  lev->deadline_timer[p] = kern_event_post(temp,
2 pj 411
                                           RM_timer_deadline,
412
                                           (void *)p);
413
}
414
 
415
static void RM_task_insert(LEVEL l, PID p)
416
{
417
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
418
 
419
  /* Similar to RM_task_activate, but we don't check in what state
420
     the task is and we don't set the request_time*/
421
 
422
  /* Insert task in the correct position */
423
  proc_table[p].status = RM_READY;
29 pj 424
  iq_priority_insert(p,&lev->ready);
2 pj 425
}
426
 
427
static void RM_task_extract(LEVEL l, PID p)
428
{
429
  /* Extract the running task from the level
430
     . we have already extract it from the ready queue at the dispatch time.
431
     . the capacity event have to be removed by the generic kernel
432
     . the wcet don't need modification...
433
     . the state of the task is set by the calling function
434
     . the deadline must remain...
435
 
436
     So, we do nothing!!!
437
  */
438
}
439
 
440
static void RM_task_endcycle(LEVEL l, PID p)
441
{
442
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
443
 
444
  /* the task has terminated his job before it consume the wcet. All OK! */
445
  if (lev->flag[p] & RM_FLAG_SPORADIC)
446
    proc_table[p].status = RM_WAIT;
447
  else /* pclass = sporadic_pclass */
448
    proc_table[p].status = RM_IDLE;
449
 
450
  /* we reset the capacity counters... */
451
  if (lev->flags & RM_ENABLE_WCET_CHECK)
452
    proc_table[p].avail_time = proc_table[p].wcet;
453
 
454
  /* when the deadline timer fire, it recognize the situation and set
455
     correctly all the stuffs (like reactivation, request_time, etc... ) */
456
}
457
 
458
static void RM_task_end(LEVEL l, PID p)
459
{
460
//  RM_level_des *lev = (RM_level_des *)(level_table[l]);
461
 
462
  proc_table[p].status = RM_ZOMBIE;
463
 
464
  /* When the deadline timer fire, it put the task descriptor in
465
     the free queue, and free the allocated bandwidth... */
466
}
467
 
468
static void RM_task_sleep(LEVEL l, PID p)
469
{
470
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
471
 
472
  /* the task has terminated his job before it consume the wcet. All OK! */
473
  proc_table[p].status = RM_WAIT;
474
 
475
  /* we reset the capacity counters... */
476
  if (lev->flags & RM_ENABLE_WCET_CHECK)
477
    proc_table[p].avail_time = proc_table[p].wcet;
478
 
479
  /* when the deadline timer fire, it recognize the situation and set
480
     correctly the task state to sleep... */
481
}
482
 
483
 
484
/* Guest Functions
485
   These functions manages a JOB_TASK_MODEL, that is used to put
486
   a guest task in the RM ready queue. */
487
 
488
static int RM_guest_create(LEVEL l, PID p, TASK_MODEL *m)
489
{
490
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
491
  JOB_TASK_MODEL *job = (JOB_TASK_MODEL *)m;
492
 
493
  /* if the RM_guest_create is called, then the pclass must be a
494
     valid pclass. */
495
 
29 pj 496
 
497
  *iq_query_timespec(p,&lev->ready) = job->deadline;
2 pj 498
 
499
  lev->deadline_timer[p] = -1;
500
 
501
  if (job->noraiseexc)
502
    lev->flag[p] = RM_FLAG_NORAISEEXC;
503
  else
504
    lev->flag[p] = 0;
505
 
29 pj 506
  *iq_query_priority(p, &lev->ready) = lev->period[p] = job->period;
2 pj 507
 
508
  /* there is no bandwidth guarantee at this level, it is performed
509
     by the level that inserts guest tasks... */
510
 
511
  return 0; /* OK, also if the task cannot be guaranteed... */
512
}
513
 
514
static void RM_guest_detach(LEVEL l, PID p)
515
{
516
  /* the RM level doesn't introduce any dinamic allocated new field.
517
     No guarantee is performed on guest tasks... so we don't have to reset
518
     the NO_GUARANTEE FIELD */
519
}
520
 
521
static void RM_guest_dispatch(LEVEL l, PID p, int nostop)
522
{
523
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
524
 
525
  /* the task state is set to EXE by the scheduler()
526
     we extract the task from the ready queue
527
     NB: we can't assume that p is the first task in the queue!!! */
29 pj 528
  iq_extract(p, &lev->ready);
2 pj 529
}
530
 
531
static void RM_guest_epilogue(LEVEL l, PID p)
532
{
533
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
534
 
535
  /* the task has been preempted. it returns into the ready queue... */
29 pj 536
  iq_priority_insert(p,&lev->ready);
2 pj 537
  proc_table[p].status = RM_READY;
538
}
539
 
540
static void RM_guest_activate(LEVEL l, PID p)
541
{
542
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
543
 
544
  /* Insert task in the correct position */
29 pj 545
  iq_priority_insert(p,&lev->ready);
2 pj 546
  proc_table[p].status = RM_READY;
547
 
548
  /* Set the deadline timer */
549
  if (!(lev->flag[p] & RM_FLAG_NORAISEEXC))
29 pj 550
    lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
2 pj 551
                                             RM_timer_guest_deadline,
552
                                             (void *)p);
553
}
554
 
555
static void RM_guest_insert(LEVEL l, PID p)
556
{
557
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
558
 
559
  /* Insert task in the correct position */
29 pj 560
  iq_priority_insert(p,&lev->ready);
2 pj 561
  proc_table[p].status = RM_READY;
562
}
563
 
564
static void RM_guest_extract(LEVEL l, PID p)
565
{
566
  /* Extract the running task from the level
567
     . we have already extract it from the ready queue at the dispatch time.
568
     . the state of the task is set by the calling function
569
     . the deadline must remain...
570
 
571
     So, we do nothing!!!
572
  */
573
}
574
 
575
static void RM_guest_endcycle(LEVEL l, PID p)
14 pj 576
{ kern_raise(XINVALID_GUEST,exec_shadow); }
2 pj 577
 
578
static void RM_guest_end(LEVEL l, PID p)
579
{
580
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
581
 
582
  //kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]);
583
  if (proc_table[p].status == RM_READY)
584
  {
29 pj 585
    iq_extract(p, &lev->ready);
2 pj 586
    //kern_printf("(g_end rdy extr)");
587
  }
588
 
589
  /* we remove the deadline timer, because the slice is finished */
590
  if (lev->deadline_timer[p] != NIL) {
591
//    kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]);
592
    event_delete(lev->deadline_timer[p]);
593
    lev->deadline_timer[p] = NIL;
594
  }
595
 
596
}
597
 
598
static void RM_guest_sleep(LEVEL l, PID p)
14 pj 599
{ kern_raise(XINVALID_GUEST,exec_shadow); }
2 pj 600
 
601
 
602
 
603
 
604
/* Registration functions */
605
 
606
/*+ Registration function:
607
    int flags                 the init flags ... see rm.h +*/
608
void RM_register_level(int flags)
609
{
610
  LEVEL l;            /* the level that we register */
611
  RM_level_des *lev;  /* for readableness only */
612
  PID i;              /* a counter */
613
 
614
  printk("RM_register_level\n");
615
 
616
  /* request an entry in the level_table */
617
  l = level_alloc_descriptor();
618
 
619
  /* alloc the space needed for the RM_level_des */
620
  lev = (RM_level_des *)kern_alloc(sizeof(RM_level_des));
621
 
622
  printk("    lev=%d\n",(int)lev);
623
 
624
  /* update the level_table with the new entry */
625
  level_table[l] = (level_des *)lev;
626
 
627
  /* fill the standard descriptor */
628
  strncpy(lev->l.level_name,  RM_LEVELNAME, MAX_LEVELNAME);
629
  lev->l.level_code               = RM_LEVEL_CODE;
630
  lev->l.level_version            = RM_LEVEL_VERSION;
631
 
632
  lev->l.level_accept_task_model  = RM_level_accept_task_model;
633
  lev->l.level_accept_guest_model = RM_level_accept_guest_model;
634
  lev->l.level_status             = RM_level_status;
635
  lev->l.level_scheduler          = RM_level_scheduler;
636
 
637
  if (flags & RM_ENABLE_GUARANTEE)
638
    lev->l.level_guarantee        = RM_level_guarantee;
639
  else
640
    lev->l.level_guarantee        = NULL;
641
 
642
  lev->l.task_create              = RM_task_create;
643
  lev->l.task_detach              = RM_task_detach;
644
  lev->l.task_eligible            = RM_task_eligible;
645
  lev->l.task_dispatch            = RM_task_dispatch;
646
  lev->l.task_epilogue            = RM_task_epilogue;
647
  lev->l.task_activate            = RM_task_activate;
648
  lev->l.task_insert              = RM_task_insert;
649
  lev->l.task_extract             = RM_task_extract;
650
  lev->l.task_endcycle            = RM_task_endcycle;
651
  lev->l.task_end                 = RM_task_end;
652
  lev->l.task_sleep               = RM_task_sleep;
653
 
654
  lev->l.guest_create             = RM_guest_create;
655
  lev->l.guest_detach             = RM_guest_detach;
656
  lev->l.guest_dispatch           = RM_guest_dispatch;
657
  lev->l.guest_epilogue           = RM_guest_epilogue;
658
  lev->l.guest_activate           = RM_guest_activate;
659
  lev->l.guest_insert             = RM_guest_insert;
660
  lev->l.guest_extract            = RM_guest_extract;
661
  lev->l.guest_endcycle           = RM_guest_endcycle;
662
  lev->l.guest_end                = RM_guest_end;
663
  lev->l.guest_sleep              = RM_guest_sleep;
664
 
665
  /* fill the RM descriptor part */
666
  for(i=0; i<MAX_PROC; i++) {
667
    lev->period[i]         = 0;
668
    lev->deadline_timer[i] = -1;
669
    lev->flag[i]          = 0;
670
  }
671
 
29 pj 672
  iq_init(&lev->ready, &freedesc, 0);
2 pj 673
  lev->flags = flags & 0x07;
674
  lev->U     = 0;
675
}
676
 
677
bandwidth_t RM_usedbandwidth(LEVEL l)
678
{
679
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
680
  if (lev->l.level_code    == RM_LEVEL_CODE &&
681
      lev->l.level_version == RM_LEVEL_VERSION)
682
    return lev->U;
683
  else
684
    return 0;
685
}
686