Subversion Repositories shark

Rev

Rev 502 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
657 anton 23
 CVS :        $Id: rm.c,v 1.9 2004-05-17 15:03:52 anton Exp $
2 pj 24
 
25
 File:        $File$
657 anton 26
 Revision:    $Revision: 1.9 $
27
 Last update: $Date: 2004-05-17 15:03:52 $
2 pj 28
 ------------
29
 
657 anton 30
 This file contains the scheduling module RM (rate/deadline monotonic)
2 pj 31
 
32
 Read rm.h for further details.
33
 
34
**/
35
 
36
/*
38 pj 37
 * Copyright (C) 2000,2002 Paolo Gai
2 pj 38
 *
39
 * This program is free software; you can redistribute it and/or modify
40
 * it under the terms of the GNU General Public License as published by
41
 * the Free Software Foundation; either version 2 of the License, or
42
 * (at your option) any later version.
43
 *
44
 * This program is distributed in the hope that it will be useful,
45
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
46
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
47
 * GNU General Public License for more details.
48
 *
49
 * You should have received a copy of the GNU General Public License
50
 * along with this program; if not, write to the Free Software
51
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
52
 *
53
 */
54
 
55
 
56
#include <modules/rm.h>
57
#include <ll/stdio.h>
58
#include <ll/string.h>
59
#include <kernel/model.h>
60
#include <kernel/descr.h>
61
#include <kernel/var.h>
62
#include <kernel/func.h>
353 giacomo 63
#include <tracer.h>
64
 
657 anton 65
//#define RM_DEBUG
66
#define rm_printf kern_printf
2 pj 67
 
657 anton 68
#ifdef RM_DEBUG
69
/* some debug print functions */
70
char *pnow() {
71
  static char buf[40];
72
  struct timespec t;
73
  sys_gettime(&t);
74
  sprintf(buf, "%ld.%06ld", t.tv_sec, t.tv_nsec/1000);
75
  return buf;
76
}
77
char *ptime1(struct timespec *t) {
78
  static char buf[40];
79
  sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
80
  return buf;
81
}
82
char *ptime2(struct timespec *t) {
83
  static char buf[40];
84
  sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
85
  return buf;
86
}
87
#endif
2 pj 88
 
657 anton 89
/* statuses used in the level */
90
#define RM_READY    MODULE_STATUS_BASE    /* ready */
91
#define RM_IDLE     MODULE_STATUS_BASE+1  /* idle, waiting for offset/eop */
92
#define RM_WAIT     MODULE_STATUS_BASE+2  /* to sleep, waiting for eop */
93
#define RM_ZOMBIE   MODULE_STATUS_BASE+3  /* zombie, waiting for eop */
2 pj 94
 
657 anton 95
/* task flags */
96
#define RM_FLAG_SPORADIC    1   /* the task is sporadic */
97
#define RM_FLAG_SPOR_LATE   2   /* sporadic task with period overrun */ 
2 pj 98
 
99
 
657 anton 100
/* the level redefinition for the Earliest Deadline First level      */
101
typedef struct {
102
  level_des l;                 /* standard level descriptor          */
103
  IQUEUE ready;                /* the ready queue                    */
104
  int flags;                   /* level flags                        */
105
  bandwidth_t U;               /* used bandwidth                     */
2 pj 106
 
657 anton 107
  int taskflags[MAX_PROC];     /* task flags                         */
108
  TIME period[MAX_PROC];       /* task period                        */
109
  TIME rdeadline[MAX_PROC];    /* task relative deadlines            */
110
  TIME offset[MAX_PROC];       /* task release offsets               */
111
  struct timespec release[MAX_PROC];   /* release time of the task   */
112
  int dl_timer[MAX_PROC];      /* deadline overrun timer             */
113
  int eop_timer[MAX_PROC];     /* end of period timer                */
114
  int dl_miss[MAX_PROC];       /* deadline miss counter              */
115
  int wcet_miss[MAX_PROC];     /* WCET miss counter                  */
116
  int nact[MAX_PROC];          /* number of pending periodic jobs    */
117
  int nskip[MAX_PROC];         /* number of skipped sporadic jobs    */
118
} RM_level_des;
2 pj 119
 
120
 
657 anton 121
static void RM_timer_endperiod(void *par);
2 pj 122
 
123
 
657 anton 124
/* This function is called when a task misses its deadline */
125
 
2 pj 126
static void RM_timer_deadline(void *par)
127
{
128
  PID p = (PID) par;
129
  RM_level_des *lev;
657 anton 130
  lev = (RM_level_des *)level_table[proc_table[p].task_level];
131
 
132
  TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
2 pj 133
 
657 anton 134
  if (lev->flags & RM_ENABLE_DL_EXCEPTION) {
135
    kern_raise(XDEADLINE_MISS,p);
136
  } else {
137
    lev->dl_miss[p]++;
138
  }
139
}
140
 
141
 
142
/* Release (or queue) task, post deadline and endperiod timers.
143
   The release time is stored in lev->release[p]. */
144
 
145
static void RM_intern_release(PID p, RM_level_des *lev)
146
{
147
  struct timespec temp;
148
 
149
  /* post deadline timer */
150
  if (lev->flags & RM_ENABLE_DL_CHECK) {
151
    temp = lev->release[p];
152
    ADDUSEC2TIMESPEC(lev->rdeadline[p], &temp);
153
    lev->dl_timer[p] = kern_event_post(&temp,RM_timer_deadline,(void *)p);
154
  }
155
 
156
  /* release or queue next job */
157
  if (proc_table[p].status == RM_IDLE) {
158
    /* assign deadline, insert task in the ready queue */
159
    proc_table[p].status = RM_READY;
160
    *iq_query_priority(p,&lev->ready) = lev->rdeadline[p];
161
    iq_priority_insert(p,&lev->ready);
162
#ifdef RM_DEBUG
163
    rm_printf("At %s: releasing %s\n", pnow(), proc_table[p].name);
164
#endif
165
    /* reschedule */
166
    event_need_reschedule();
167
  } else {
168
    /* queue */
169
    lev->nact[p]++;
170
  }
171
 
172
  /* increase release time */
173
  ADDUSEC2TIMESPEC(lev->period[p],&lev->release[p]);
174
  /* post end of period timer */
175
  kern_event_post(&lev->release[p],RM_timer_endperiod,(void *)p);
176
 
177
  TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
178
}
179
 
180
 
181
/* First release */
182
 
183
static void RM_timer_offset(void *par)
184
{
185
  PID p = (PID) par;
186
  RM_level_des *lev;
2 pj 187
  lev = (RM_level_des *)level_table[proc_table[p].task_level];
188
 
657 anton 189
  RM_intern_release(p, lev);
190
}
2 pj 191
 
192
 
657 anton 193
/* This function is called at the end of the period */
2 pj 194
 
657 anton 195
static void RM_timer_endperiod(void *par)
196
{
197
  PID p = (PID) par;
198
  RM_level_des *lev;
199
  lev = (RM_level_des *)level_table[proc_table[p].task_level];
200
 
201
  if (proc_table[p].status == RM_ZOMBIE) {
202
    /* put the task in the FREE state */
203
    proc_table[p].status = FREE;
204
    iq_insertfirst(p,&freedesc);
205
    /* free the allocated bandwidth */
206
    lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
207
    return;
2 pj 208
  }
657 anton 209
 
210
  if (!(lev->taskflags[p] & RM_FLAG_SPORADIC)) {
211
    /* if the task is periodic, rerelease it (now or later) */
212
    RM_intern_release(p, lev);
213
  } else {
214
    /* else check if the task is waiting for end of period */
215
    if (proc_table[p].status == RM_WAIT) {
216
      proc_table[p].status = SLEEP;
217
    } else {
218
      /* the task is still busy. mark it as late */
219
      lev->taskflags[p] |= RM_FLAG_SPOR_LATE;
220
    }
221
  }
2 pj 222
}
223
 
657 anton 224
 
225
/* This function is called when a guest task misses its deadline */
226
 
2 pj 227
static void RM_timer_guest_deadline(void *par)
228
{
229
  PID p = (PID) par;
657 anton 230
  TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
2 pj 231
  kern_raise(XDEADLINE_MISS,p);
232
}
233
 
234
/* The scheduler only gets the first task in the queue */
38 pj 235
static PID RM_public_scheduler(LEVEL l)
2 pj 236
{
237
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
29 pj 238
  return iq_query_first(&lev->ready);
2 pj 239
}
240
 
241
/* The on-line guarantee is enabled only if the appropriate flag is set... */
38 pj 242
static int RM_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
2 pj 243
{
244
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
245
 
159 pj 246
  if (*freebandwidth >= lev->U) {
247
    *freebandwidth -= lev->U;
248
    return 1;
2 pj 249
  }
250
  else
159 pj 251
    return 0;
2 pj 252
}
253
 
38 pj 254
static int RM_public_create(LEVEL l, PID p, TASK_MODEL *m)
2 pj 255
{
256
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
38 pj 257
  HARD_TASK_MODEL *h;
2 pj 258
 
38 pj 259
  if (m->pclass != HARD_PCLASS) return -1;
260
  if (m->level != 0 && m->level != l) return -1;
261
  h = (HARD_TASK_MODEL *)m;
262
  if (!h->wcet || !h->mit) return -1;
657 anton 263
  if (h->drel > h->mit) return -1;  /* only D <= T supported */
159 pj 264
 
657 anton 265
  if (!h->drel) {
266
    lev->rdeadline[p] = h->mit;
267
  } else {
268
    lev->rdeadline[p] = h->drel;
269
  }
270
 
271
  /* check the free bandwidth... */
159 pj 272
  if (lev->flags & RM_ENABLE_GUARANTEE) {
273
    bandwidth_t b;
657 anton 274
    b = (MAX_BANDWIDTH / lev->rdeadline[p]) * h->wcet;
159 pj 275
 
276
    /* really update lev->U, checking an overflow... */
657 anton 277
    if (MAX_BANDWIDTH - lev->U > b) {
159 pj 278
      lev->U += b;
657 anton 279
    } else {
159 pj 280
      return -1;
657 anton 281
    }
159 pj 282
  }
283
 
657 anton 284
  if (lev->flags & RM_ENABLE_WCET_EXCEPTION) {
285
    lev->flags |= RM_ENABLE_WCET_CHECK;
286
  }
287
  if (lev->flags & RM_ENABLE_DL_EXCEPTION) {
288
    lev->flags |= RM_ENABLE_DL_CHECK;
289
  }
2 pj 290
 
657 anton 291
  lev->period[p] = h->mit;
292
  if (lev->rdeadline[p] == lev->period[p]) {
293
    /* Ensure that D <= T-eps to make dl_timer trigger before rel_timer */
294
    lev->rdeadline[p] = lev->period[p] - 1;
295
  }
296
 
297
  lev->taskflags[p] = 0;
2 pj 298
 
299
  if (h->periodicity == APERIODIC)
657 anton 300
    lev->taskflags[p] |= RM_FLAG_SPORADIC;
301
 
302
  lev->dl_timer[p] = -1;
303
  lev->eop_timer[p] = -1;
2 pj 304
 
305
  /* Enable wcet check */
306
  if (lev->flags & RM_ENABLE_WCET_CHECK) {
307
    proc_table[p].avail_time = h->wcet;
308
    proc_table[p].wcet       = h->wcet;
657 anton 309
    proc_table[p].control |= CONTROL_CAP; /* turn on measurement */
2 pj 310
  }
311
 
657 anton 312
  lev->offset[p] = h->offset;
313
 
314
  NULL_TIMESPEC(&lev->release[p]);
315
 
2 pj 316
  return 0; /* OK, also if the task cannot be guaranteed... */
317
}
318
 
38 pj 319
static void RM_public_detach(LEVEL l, PID p)
2 pj 320
{
321
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
322
 
159 pj 323
  if (lev->flags & RM_ENABLE_GUARANTEE) {
657 anton 324
    lev->U -= (MAX_BANDWIDTH / lev->rdeadline[p]) * proc_table[p].wcet;
159 pj 325
  }
2 pj 326
}
327
 
38 pj 328
static void RM_public_dispatch(LEVEL l, PID p, int nostop)
2 pj 329
{
330
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
29 pj 331
  iq_extract(p, &lev->ready);
2 pj 332
}
333
 
38 pj 334
static void RM_public_epilogue(LEVEL l, PID p)
2 pj 335
{
336
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
337
 
338
  /* check if the wcet is finished... */
657 anton 339
  if (lev->flags & RM_ENABLE_WCET_CHECK) {
340
    if (proc_table[p].avail_time <= 0) {
341
      TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,(unsigned short int)proc_table[p].context,0);
342
      if (lev->flags & RM_ENABLE_WCET_EXCEPTION) {
343
        kern_raise(XWCET_VIOLATION,p);
344
      } else {
345
        proc_table[p].control &= ~CONTROL_CAP;
346
        lev->wcet_miss[p]++;
347
      }
348
    }
2 pj 349
  }
657 anton 350
 
351
  /* the task returns to the ready queue */
352
  iq_priority_insert(p,&lev->ready);
353
  proc_table[p].status = RM_READY;
354
 
2 pj 355
}
356
 
657 anton 357
static void RM_public_activate(LEVEL l, PID p, struct timespec *t)
2 pj 358
{
657 anton 359
  struct timespec clocktime;
2 pj 360
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
361
 
657 anton 362
  kern_gettime(&clocktime);
363
 
364
  /* check if we are not in the SLEEP state */
365
  if (proc_table[p].status != SLEEP) {
366
    if (lev->flags & RM_ENABLE_ACT_EXCEPTION) {
367
      /* too frequent or wrongful activation: raise exception */
368
      kern_raise(XACTIVATION,p);
369
    } else {
370
      /* skip the sporadic job, but increase a counter */
371
#ifdef RM_DEBUG
372
      rm_printf("At %s: activation of %s skipped\n", pnow(), proc_table[p].name);
373
#endif
374
      lev->nskip[p]++;
375
    }
2 pj 376
    return;
377
  }
657 anton 378
 
379
  /* set the release time to the activation time + offset */
380
  lev->release[p] = *t;
381
  ADDUSEC2TIMESPEC(lev->offset[p], &lev->release[p]);
2 pj 382
 
657 anton 383
  /* Check if release > clocktime. If so, release it later,
384
     otherwise release it now. */
2 pj 385
 
657 anton 386
  proc_table[p].status = RM_IDLE;
2 pj 387
 
657 anton 388
  if (TIMESPEC_A_GT_B(&lev->release[p], &clocktime)) {
389
    /* release later */
390
    kern_event_post(&lev->release[p],RM_timer_offset,(void *)p);
391
  } else {
392
    /* release now */
393
    RM_intern_release(p, lev);
394
  }
2 pj 395
}
396
 
38 pj 397
static void RM_public_unblock(LEVEL l, PID p)
2 pj 398
{
399
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
400
 
401
  /* Insert task in the correct position */
402
  proc_table[p].status = RM_READY;
29 pj 403
  iq_priority_insert(p,&lev->ready);
2 pj 404
}
405
 
38 pj 406
static void RM_public_block(LEVEL l, PID p)
2 pj 407
{
408
  /* Extract the running task from the level
409
     . we have already extract it from the ready queue at the dispatch time.
410
     . the capacity event have to be removed by the generic kernel
411
     . the wcet don't need modification...
412
     . the state of the task is set by the calling function
413
     . the deadline must remain...
414
 
415
     So, we do nothing!!!
416
  */
417
}
418
 
38 pj 419
static int RM_public_message(LEVEL l, PID p, void *m)
2 pj 420
{
421
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
422
 
657 anton 423
  switch((long)(m)) {
424
    /* task_endcycle() */
425
  case 0:
426
    /* if there are no pending jobs */
427
    if (lev->nact[p] == 0) {
428
      /* remove deadline timer, if any */
429
      if (lev->dl_timer[p] != -1) {
430
        kern_event_delete(lev->dl_timer[p]);
431
        lev->dl_timer[p] = -1;
432
      }
433
      if (lev->taskflags[p] & RM_FLAG_SPORADIC) {
434
        /* sporadic task */
435
        if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) {
436
          proc_table[p].status = RM_WAIT;
437
        } else {
438
          /* it's late, move it directly to SLEEP */
439
          proc_table[p].status = SLEEP;
440
          lev->taskflags[p] &= ~RM_FLAG_SPOR_LATE;
441
        }
442
      } else {
443
        /* periodic task */
444
        proc_table[p].status = RM_IDLE;
445
      }
446
    } else {
447
      /* we are late / there are pending jobs */
448
      lev->nact[p]--;
449
      *iq_query_priority(p,&lev->ready) = lev->rdeadline[p];
450
      iq_priority_insert(p,&lev->ready);
451
#ifdef RM_DEBUG
452
      rm_printf("(Late) At %s: releasing %s\n",
453
         pnow(), proc_table[p].name);
454
#endif
455
    }
456
    break;
457
 
458
    /* task_sleep() */
459
  case 1:
460
    /* remove deadline timer, if any */
461
    if (lev->dl_timer[p] != -1) {
462
      kern_event_delete(lev->dl_timer[p]);
463
      lev->dl_timer[p] = -1;
464
    }
465
    if (lev->taskflags[p] & RM_FLAG_SPORADIC) {
466
      /* sporadic task */
467
      if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) {
468
        proc_table[p].status = RM_WAIT;
469
      } else {
470
        /* it's late, move it directly to SLEEP */
471
        proc_table[p].status = SLEEP;
472
        lev->taskflags[p] &= ~RM_FLAG_SPOR_LATE;
473
      }
474
    } else {
475
      /* periodic task */
476
      if (!(lev->nact[p] > 0)) {
477
        /* we are on time. go to the RM_WAIT state */
478
        proc_table[p].status = RM_WAIT;
479
      } else {
480
        /* we are late. delete pending activations and go to SLEEP */
481
        lev->nact[p] = 0;
482
        proc_table[p].status = SLEEP;
483
        /* remove end of period timer */
484
        if (lev->eop_timer[p] != -1) {
485
          kern_event_delete(lev->eop_timer[p]);
486
          lev->eop_timer[p] = -1;
487
        }
488
      }
489
    }
490
    break;
491
  }
2 pj 492
 
657 anton 493
  if (lev->flags & RM_ENABLE_WCET_CHECK) {
494
    proc_table[p].control |= CONTROL_CAP;
495
  }
496
  proc_table[p].avail_time = proc_table[p].wcet;
38 pj 497
  jet_update_endcycle(); /* Update the Jet data... */
657 anton 498
  TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
38 pj 499
 
500
  return 0;
657 anton 501
 
2 pj 502
}
503
 
38 pj 504
static void RM_public_end(LEVEL l, PID p)
2 pj 505
{
657 anton 506
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
2 pj 507
 
657 anton 508
  if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) {
509
    /* remove the deadline timer (if any) */
510
    if (lev->dl_timer[p] != -1) {
511
      kern_event_delete(lev->dl_timer[p]);
512
      lev->dl_timer[p] = -1;
513
    }
514
    proc_table[p].status = RM_ZOMBIE;
515
  } else {
516
    /* no endperiod timer will be fired, free the task now! */
517
    proc_table[p].status = FREE;
518
    iq_insertfirst(p,&freedesc);
519
    /* free the allocated bandwidth */
520
    lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
521
  }
2 pj 522
}
523
 
38 pj 524
static void RM_private_insert(LEVEL l, PID p, TASK_MODEL *m)
2 pj 525
{
526
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
38 pj 527
  JOB_TASK_MODEL *job;
2 pj 528
 
38 pj 529
  if (m->pclass != JOB_PCLASS || (m->level != 0 && m->level != l) ) {
530
    kern_raise(XINVALID_TASK, p);
531
    return;
532
  }
2 pj 533
 
38 pj 534
  job = (JOB_TASK_MODEL *)m;
2 pj 535
 
657 anton 536
  /* Insert task in the correct position */
537
  *iq_query_timespec(p, &lev->ready) = job->deadline;
538
  /* THIS IS QUESTIONABLE!! rel deadline? */
38 pj 539
  *iq_query_priority(p, &lev->ready) = lev->period[p] = job->period;
2 pj 540
 
38 pj 541
  iq_priority_insert(p,&lev->ready);
542
  proc_table[p].status = RM_READY;
657 anton 543
 
544
  lev->dl_timer[p] = -1;
38 pj 545
 
657 anton 546
  lev->period[p] = job->period;
547
 
548
  if (!job->noraiseexc) {
549
    lev->dl_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
550
                                       RM_timer_guest_deadline,(void *)p);
38 pj 551
  }
2 pj 552
}
553
 
38 pj 554
static void RM_private_dispatch(LEVEL l, PID p, int nostop)
2 pj 555
{
556
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
557
 
558
  /* the task state is set to EXE by the scheduler()
559
     we extract the task from the ready queue
560
     NB: we can't assume that p is the first task in the queue!!! */
29 pj 561
  iq_extract(p, &lev->ready);
2 pj 562
}
563
 
38 pj 564
static void RM_private_epilogue(LEVEL l, PID p)
2 pj 565
{
566
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
567
 
568
  /* the task has been preempted. it returns into the ready queue... */
29 pj 569
  iq_priority_insert(p,&lev->ready);
2 pj 570
  proc_table[p].status = RM_READY;
571
}
572
 
38 pj 573
static void RM_private_extract(LEVEL l, PID p)
2 pj 574
{
575
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
576
 
577
  if (proc_table[p].status == RM_READY)
29 pj 578
    iq_extract(p, &lev->ready);
2 pj 579
 
580
  /* we remove the deadline timer, because the slice is finished */
657 anton 581
  if (lev->dl_timer[p] != -1) {
582
    kern_event_delete(lev->dl_timer[p]);
583
    lev->dl_timer[p] = -1;
2 pj 584
  }
585
 
586
}
587
 
588
 
657 anton 589
 
590
/* Registration function:
591
    int flags                 the init flags ... see rm.h */
38 pj 592
LEVEL RM_register_level(int flags)
2 pj 593
{
594
  LEVEL l;            /* the level that we register */
595
  RM_level_des *lev;  /* for readableness only */
596
  PID i;              /* a counter */
597
 
598
  printk("RM_register_level\n");
599
 
600
  /* request an entry in the level_table */
38 pj 601
  l = level_alloc_descriptor(sizeof(RM_level_des));
2 pj 602
 
38 pj 603
  lev = (RM_level_des *)level_table[l];
2 pj 604
 
605
  /* fill the standard descriptor */
38 pj 606
  lev->l.private_insert   = RM_private_insert;
607
  lev->l.private_extract  = RM_private_extract;
608
  lev->l.private_dispatch = RM_private_dispatch;
609
  lev->l.private_epilogue = RM_private_epilogue;
2 pj 610
 
38 pj 611
  lev->l.public_scheduler = RM_public_scheduler;
2 pj 612
  if (flags & RM_ENABLE_GUARANTEE)
38 pj 613
    lev->l.public_guarantee = RM_public_guarantee;
2 pj 614
  else
38 pj 615
    lev->l.public_guarantee = NULL;
2 pj 616
 
38 pj 617
  lev->l.public_create    = RM_public_create;
618
  lev->l.public_detach    = RM_public_detach;
619
  lev->l.public_end       = RM_public_end;
620
  lev->l.public_dispatch  = RM_public_dispatch;
621
  lev->l.public_epilogue  = RM_public_epilogue;
622
  lev->l.public_activate  = RM_public_activate;
623
  lev->l.public_unblock   = RM_public_unblock;
624
  lev->l.public_block     = RM_public_block;
625
  lev->l.public_message   = RM_public_message;
2 pj 626
 
627
  /* fill the RM descriptor part */
628
  for(i=0; i<MAX_PROC; i++) {
657 anton 629
    lev->period[i]    = 0;
630
    lev->dl_timer[i]  = -1;
631
    lev->taskflags[i] = 0;
632
    lev->dl_miss[i]   = 0;
633
    lev->wcet_miss[i] = 0;
634
    lev->nact[i]      = 0;
635
    lev->nskip[i]     = 0;
2 pj 636
  }
637
 
29 pj 638
  iq_init(&lev->ready, &freedesc, 0);
159 pj 639
  lev->flags = flags;
2 pj 640
  lev->U     = 0;
38 pj 641
 
642
  return l;
2 pj 643
}
644
 
645
bandwidth_t RM_usedbandwidth(LEVEL l)
646
{
647
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
38 pj 648
 
649
  return lev->U;
2 pj 650
}
651
 
657 anton 652
int RM_get_nact(PID p)
653
{
654
  LEVEL l = proc_table[p].task_level;
655
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
656
 
657
  return lev->nact[p];
658
}
659
 
660
int RM_get_dl_miss(PID p)
661
{
662
  LEVEL l = proc_table[p].task_level;
663
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
664
 
665
  return lev->dl_miss[p];
666
}
667
 
668
int RM_get_wcet_miss(PID p)
669
{
670
  LEVEL l = proc_table[p].task_level;
671
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
672
 
673
  return lev->wcet_miss[p];
674
}
675
 
676
int RM_get_nskip(PID p)
677
{
678
  LEVEL l = proc_table[p].task_level;
679
  RM_level_des *lev = (RM_level_des *)(level_table[l]);
680
 
681
  return lev->nskip[p];
682
}
683