Subversion Repositories shark

Rev

Rev 29 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
38 pj 23
 CVS :        $Id: ss.c,v 1.4 2003-01-07 17:07:51 pj Exp $
2 pj 24
 
25
 File:        $File$
38 pj 26
 Revision:    $Revision: 1.4 $
27
 Last update: $Date: 2003-01-07 17:07:51 $
2 pj 28
 ------------
29
 
30
 This file contains the aperiodic Sporadic Server (SS).
31
 
32
 Note: in the following, server capacity and server budget are used as
33
       synonyms.
34
 
35
 When scheduling in background  the flags field has the SS_BACKGROUND bit set
36
 
37
 When scheduling a task because it is pointed by another task via shadows,
38
 the task have to be extracted from the wait queue or the master level. To
39
 check this we have to look at the activated field; it is != NIL if a task
40
 is inserted into the master level. Only a task at a time can be inserted
41
 into the master level.
42
 
43
 The capacity of the server must be updated
44
 - when scheduling a task normally
45
 - when scheduling a task because it is pointed by a shadow
46
   but not when scheduling in background.
47
 
48
 When a task is extracted from the system no scheduling has to be done
49
 until the task reenter into the system. To implement this, when a task
50
 is extracted we block the background scheduling (the scheduling with the
51
 master level is already blocked because the activated field is not
52
 reset to NIL) using the SS_BACKGROUNDBLOCK bit.
53
 
54
 nact[p] is -1 if the task set the activations to SKIP, >= 0 otherwise
55
 
56
 In contrast to classic SS scheme, the activation happens when
57
 a task does a create request while there is positive budget (instead to
58
 becomes active when there is a running task with priority higger then or
59
 equal to the server).
60
 So the replenish time is estabished on task arrival time. Replenish time
61
 is calculated as usual: activation time + server period.
62
 When the server ends its budget, becomes not active until a replenishment
63
 occurs.
64
 
65
 When a task ends its computation and there are no tasks to schedule or,
66
 again, the server budget ends, a replenish amount is posted so that, when
67
 replenish time fires, the server budget will be updated. Replenish
68
 amount is determined depending on how much time tasks have ran.
69
 Replenish amount does't takes into account periods during witch tasks
70
 handled by SS are preempted.
71
 
72
 There are two models used to handle a task is running into a critic section
73
 (owning a mutex): "nostop" model and "stop" model.
74
 Using the "nostop" model, a task that runs into a critic section is not
75
 stopped when server ends its budget. This is done so higger priority tasks
76
 waiting for mutex are not blocked for so much time to replenish time occurs.
77
 When this happens the server capacity becomes negative and the replenish
78
 amount takes into account the negative budget part.
79
 With "stop" model running task is always suspended when server budget ends.
80
 If suspended task owns a mutex shared with higger priority task, the last
81
 one cannot runs until the mutex will be released. Higger priority task
82
 must waits at least upto next replenish time, when server budget will be
83
 refulled and suspended task runs again.
84
 
85
 Using "nostop" model, SS can uses more bandwidth respect to assigned
86
 capacity (due to negative budgets). So, calculating the guarantee, the
87
 longer critic section of all tasks handled by SS must be considered.
88
 
89
 SS can be used either with EDF or RM master level.
90
 
91
 Read SS.h for further details.
92
 
93
**/
94
 
95
/*
96
 * Copyright (C) 2000 Paolo Gai
97
 *
98
 * This program is free software; you can redistribute it and/or modify
99
 * it under the terms of the GNU General Public License as published by
100
 * the Free Software Foundation; either version 2 of the License, or
101
 * (at your option) any later version.
102
 *
103
 * This program is distributed in the hope that it will be useful,
104
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
105
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
106
 * GNU General Public License for more details.
107
 *
108
 * You should have received a copy of the GNU General Public License
109
 * along with this program; if not, write to the Free Software
110
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
111
 *
112
 */
113
 
114
 
115
#include <stdlib.h>
116
#include <modules/ss.h>
117
#include <ll/stdio.h>
118
#include <ll/string.h>
119
 
120
#include <ll/sys/ll/event.h>
121
 
122
#include <kernel/const.h>
123
#include <kernel/model.h>
124
#include <kernel/model.h>
125
#include <kernel/descr.h>
126
#include <kernel/var.h>
127
#include <kernel/func.h>
38 pj 128
#include <kernel/trace.h>
2 pj 129
 
130
/* For debugging purpose */
131
//#define DEBUG 1
132
 
133
/*+ Status used in the level +*/
134
#define SS_WAIT          APER_STATUS_BASE    /*+ waiting the service   +*/
135
 
136
/*+ Some useful macros +*/
137
#define BACKGROUND_ON  (lev->flags & SS_BACKGROUND)
138
 
139
extern struct event *firstevent;
140
 
141
/*+ the level redefinition for the Sporadic Server +*/
142
typedef struct {
143
  level_des l;     /*+ the standard level descriptor          +*/
144
 
145
  /* The wcet are stored in the task descriptor's priority
146
     field, so no other fields are needed                      */
147
 
148
  int nact[MAX_PROC]; /*+ number of pending activations       +*/
149
 
150
  struct timespec lastdline; /*+ the last deeadline assigned to
151
                                 a SS task                    +*/
152
 
153
  int Cs;          /*+ server capacity                        +*/
154
  int availCs;     /*+ server avail time                      +*/
155
  int period;      /*+ Server period +*/
156
 
157
  bandwidth_t U;   /*+ the used bandwidth by the server       +*/
158
 
29 pj 159
  IQUEUE wait;     /*+ the wait queue of the SS               +*/
2 pj 160
  PID activated;   /*+ the task inserted in another queue     +*/
161
 
162
  int flags;       /*+ the init flags...                      +*/
163
 
164
 
165
  LEVEL scheduling_level;
166
 
167
  int replenishment[SS_MAX_REPLENISH]; /*+ contains replenish amounts +*/
168
  int rfirst,rlast;                    /*+ first and last valid replenish
169
                                            in replenish queue +*/
170
  int rcount;                           /*+ queued replenishments +*/
171
 
172
  int replenish_amount;            /*+ partial replenishments before post +*/
173
  ss_status server_active;         /*+ Is server active? +*/
174
 
175
} SS_level_des;
176
 
177
/*+ function prototypes +*/
38 pj 178
void SS_internal_status(LEVEL l);
2 pj 179
static void SS_replenish_timer(void *arg);
180
/*-------------------------------------------------------------------*/
181
 
182
/*** Utility functions ***/
183
 
184
 
185
/* These are for dinamic queue. **Disabled** */
186
#if 0
187
/* These routines are not tested, be carefull */
188
 
189
/*+ SS local memory allocator.
190
    Can be used for performance optimization.
191
    The interface is the same of kern_alloc() +*/
192
void inline * ss_alloc(DWORD b) {
193
        /* Now simply wraps to standard kernel alloc */
194
        return kern_alloc(b);
195
}
196
 
197
void ssq_inslast(LEVEL l, replenishq *elem) {
198
 
199
        SS_level_des *lev = (SS_level_des *) level_table[l];
200
 
201
        if(lev->rqueue_last == NULL) { /* empty queue */
202
                lev->rqueue_last=elem;
203
                lev->rqueue_first=elem;
204
                return;
205
        }
206
        elem->next = NULL;
207
        lev->rqueue_last->next = elem;
208
        lev->rqueue_last = elem;
209
}
210
 
211
replenishq *ssq_getfirst(LEVEL l) {
212
 
213
        SS_level_des *lev = (SS_level_des *) level_table[l];
214
        replenishq *tmp;
215
 
216
        if(lev->rqueue_first == NULL) { /* empty queue */
217
                return 0;
218
        }
219
        tmp = lev->rqueue_first;
220
        lev->rqueue_first = tmp->next;
221
        if(lev->rqueue_first == NULL) { /* no more elements */
222
                lev->rqueue_last = NULL;
223
        }
224
        tmp->next = NULL;       /* to remove dangling pointer */
225
        return tmp;
226
}
227
#endif
228
 
229
/* For queue implemented with array.
230
   SS_MAX_REPLENISH array size assumed */
231
 
232
/*+ Insert an element at tail of replenish queue
233
        LEVEL l                 module level
234
        int   amount            element to insert
235
 
236
        RETURNS:
237
 
238
          NIL   no more space for insertion +*/
239
static inline int ssq_inslast (LEVEL l, int amount) {
240
 
241
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
242
 
243
  #ifdef DEBUG
244
  kern_printf("insl ");
245
  #endif
246
 
247
  if (lev->rcount == SS_MAX_REPLENISH) {
248
    return NIL; /* no more space in the queue */
249
  }
250
 
251
  lev->replenishment[lev->rlast++] = amount;
252
  lev->rlast %= SS_MAX_REPLENISH;
253
  lev->rcount++;
254
  #ifdef DEBUG
255
  printf_xy(0,0,WHITE,"%d",lev->rcount);
256
  #endif
257
 
258
  return 0;
259
}
260
 
261
/*+ Get first element from replenish queue
262
        LEVEL l         module level
263
 
264
        RETURS:
265
          extracted element
266
          NIL on empty queue +*/
267
static inline int ssq_getfirst (LEVEL l) {
268
 
269
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
270
  int tmp;
271
 
272
  #ifdef DEBUG
273
  kern_printf("getf ");
274
  #endif
275
 
276
  if (lev->rcount == 0) {
277
    return NIL; /* empty queue */
278
  }
279
  tmp = lev->replenishment[lev->rfirst++];
280
  lev->rfirst %= SS_MAX_REPLENISH;
281
  lev->rcount--;
282
  #ifdef DEBUG
283
  printf_xy(0,0,WHITE,"%d",lev->rcount);
284
  #endif
285
  return tmp;
286
}
287
 
288
/*+ Enquire for empty queue
289
        LEVEL l         module level
290
 
291
        RETURS:
292
 
293
          1     queue is empty +*/
294
static inline int ssq_isempty (LEVEL l) {
295
 
296
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
297
 
298
  return !(lev->rcount);
299
 
300
//  if(lev->rcount == 0)
301
//    return 1;
302
//  return 0;
303
}
304
 
305
/*+ Set replenish amount for budget used during task execution
306
        LEVEL l         module level */
307
static inline void SS_set_ra(LEVEL l)
308
{
309
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
310
 
311
  /* replenish must be set when the server is still active */
312
  if(lev->server_active == SS_SERVER_ACTIVE) {
313
    lev->server_active = SS_SERVER_NOTACTIVE;
314
    if(ssq_inslast(l, lev->replenish_amount) == NIL) {
315
      kern_printf("SS: no more space to post replenishment\n");
316
      kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
38 pj 317
      SS_internal_status(l);
14 pj 318
      kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
2 pj 319
      #ifdef DEBUG
320
      sys_abort(-1);
321
      exit(-1);
322
      #endif
323
    }
324
    lev->replenish_amount = 0;
325
  }
326
  else {
327
    kern_printf("SS not active when posting R.A.\n");
38 pj 328
    SS_internal_status(l);
14 pj 329
    kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
2 pj 330
    #ifdef DEBUG
331
    sys_abort(-1);
332
    exit(-1);
333
    #endif
334
  }
335
}
336
/* ------------------------------------------------------------------ */
337
 
338
/* This static function activates the task pointed by lev->activated) */
339
static inline void SS_activation(SS_level_des *lev)
340
{
341
    /* those two defines are for readableness */
342
    PID   p;
343
    LEVEL m;
344
 
345
    JOB_TASK_MODEL j;          /* the guest model */
346
//    struct timespec ty;
347
 
348
    #ifdef DEBUG
349
    kern_printf("SS_acti ");
350
    #endif
351
 
352
    p = lev->activated;
353
    m = lev->scheduling_level;
354
 
355
#if 0
356
    /* if server is active, replenish time already set */
357
    if (lev->server_active == SS_SERVER_NOTACTIVE) {
358
       lev->server_active = SS_SERVER_ACTIVE;
359
       /* set replenish time */
360
       TIMESPEC_ASSIGN(&ty, &proc_table[p].request_time);
361
       ADDUSEC2TIMESPEC(lev->period, &ty);
362
       TIMESPEC_ASSIGN(&lev->lastdline, &ty);
363
       #ifdef DEBUG
364
       kern_printf("RT:%d.%d ",ty.tv_sec,ty.tv_nsec);
365
       #endif
366
       kern_event_post(&ty, SS_replenish_timer, (void *) l);
367
    }
368
#endif
369
 
370
    job_task_default_model(j,lev->lastdline);
371
    job_task_def_period(j,lev->period);
38 pj 372
    level_table[m]->private_insert(m,p,(TASK_MODEL *)&j);
2 pj 373
 
374
    #ifdef DEBUG
375
    kern_printf("PID:%p lastdl:%d.%d ",p,lev->lastdline.tv_sec,lev->lastdline.tv_nsec);
376
    #endif
377
}
378
 
379
/*+
380
    Before call capacity_timer, update server capacity
381
    and replenish amount.
382
+*/
383
static void SS_capacity_timer(void *arg) {
384
 
385
        LEVEL l = (LEVEL)arg;
386
        SS_level_des *lev = (SS_level_des *)(level_table[l]);
387
        struct timespec ty;
388
        int tx;
389
 
390
        #ifdef DEBUG
391
        kern_printf("SS_captim ");
392
        #endif
393
 
394
        /* set replenish amount */
395
        /* task was running while budget ends */
396
        lev->server_active = SS_SERVER_NOTACTIVE;
397
        SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
398
        tx = TIMESPEC2USEC(&ty);
399
        lev->availCs -= tx;
400
        if(ssq_inslast(l, tx+lev->replenish_amount) == NIL) {
401
           kern_printf("SS: no more space to post replenishment\n");
402
           kern_printf("    You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
38 pj 403
           SS_internal_status(l);
14 pj 404
           kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
2 pj 405
           #ifdef DEBUG
406
           sys_abort(-1);
407
           exit(-1);
408
           #endif
409
        }
410
        lev->replenish_amount = 0;
411
        capacity_timer(NULL);
412
}
413
 
414
static void SS_replenish_timer(void *arg)
415
{
416
  LEVEL l = (LEVEL)arg;
417
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
418
  struct timespec ty;
419
  int amount;
420
 
421
    #ifdef DEBUG
422
    kern_printf("SS_reptim ");
423
    #endif
424
 
425
  /* availCs may be <0 because a task executed via a shadow for many time
426
     lev->activated == NIL only if the prec task was finished and there
427
     was not any other task to be put in the ready queue
428
     ... we are now activating the next task */
429
  if ((amount = ssq_getfirst(l)) != NIL) {
430
    lev->availCs += amount;
431
    #ifdef DEBUG
432
    kern_printf("AvaCs=%d ",lev->availCs);
433
    #endif
434
    if (lev->availCs > lev->Cs) {
435
      /* This should not be possible. I do so for robustness. */
436
      lev->availCs = lev->Cs;
437
      #ifdef DEBUG
438
      kern_printf("SS warning: budget higher then server capacity. Set to Cs.");
439
      #endif
440
    }
441
    if (lev->availCs <= 0) {
442
      /* we can be here if nostop model is used */
443
      #ifdef DEBUG
444
      kern_printf("WARNING: SS has non positive capacity after replenish.");
445
      #endif
446
      /* if there isn't pending replenishment and server
447
         is not active we must refull somehow.
448
         Otherwise SS remains not active forever */
449
      if(ssq_isempty(l) && lev->server_active == SS_SERVER_NOTACTIVE) {
450
        lev->availCs = lev->Cs;
451
        kern_printf("SS was full replenished due to irreversible non positive budget!!!\n");
452
        kern_printf("You should review your time extimation for critical sections ;)\n");
453
      }
454
    }
455
  }
456
  else {
457
    /* replenish queue is empty */
458
    kern_printf("Replenish Timer fires but no Replenish Amount defined\n");
38 pj 459
    SS_internal_status(l);
14 pj 460
    kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
2 pj 461
    #ifdef DEBUG
462
    sys_abort(-1);
463
    exit(-1);
464
    #endif
465
  }
466
 
467
  if (lev->availCs > 0 && lev->activated == NIL) {
29 pj 468
    if (iq_query_first(&lev->wait) != NIL) {
469
      lev->activated = iq_getfirst(&lev->wait);
2 pj 470
      /* if server is active, replenish time already set */
471
      if (lev->server_active == SS_SERVER_NOTACTIVE) {
472
         lev->server_active = SS_SERVER_ACTIVE;
473
         /* set replenish time */
38 pj 474
         kern_gettime(&ty);
2 pj 475
         ADDUSEC2TIMESPEC(lev->period, &ty);
476
         TIMESPEC_ASSIGN(&lev->lastdline, &ty);
477
         #ifdef DEBUG
478
         kern_printf("RT:%d.%d ",ty.tv_sec,ty.tv_nsec);
479
         #endif
480
         kern_event_post(&ty, SS_replenish_timer, (void *) l);
481
      }
482
      SS_activation(lev);
483
      event_need_reschedule();
484
    }
485
  }
486
}
487
 
488
static char *SS_status_to_a(WORD status)
489
{
490
  if (status < MODULE_STATUS_BASE)
38 pj 491
    return "Unavailable"; //status_to_a(status);
2 pj 492
 
493
  switch (status) {
494
    case SS_WAIT         : return "SS_Wait";
495
    default              : return "SS_Unknown";
496
  }
497
}
498
 
499
 
500
/*-------------------------------------------------------------------*/
501
 
502
/*** Level functions ***/
503
 
38 pj 504
void SS_internal_status(LEVEL l)
2 pj 505
{
506
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
29 pj 507
  PID p = iq_query_first(&lev->wait);
2 pj 508
 
509
  kern_printf("On-line guarantee : %s\n",
510
    (lev->flags & SS_ENABLE_GUARANTEE_EDF ||
511
     lev->flags & SS_ENABLE_GUARANTEE_RM  )?"On":"Off");
512
 
513
  kern_printf("Used Bandwidth    : %u/%u\n",lev->U,MAX_BANDWIDTH);
514
  kern_printf("Period            : %d\n",lev->period);
515
  kern_printf("Capacity          : %d\n",lev->Cs);
516
  kern_printf("Avail capacity    : %d\n",lev->availCs);
517
  kern_printf("Server is %sactive\n",
518
     (lev->server_active == SS_SERVER_NOTACTIVE ? "not ":""));
519
  kern_printf("Pending RAs       : %d\n",lev->rcount);
520
 
521
  if (lev->activated != NIL)
522
    kern_printf("Activated: Pid: %d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
523
                lev->activated,
524
                proc_table[lev->activated].name,
29 pj 525
                iq_query_timespec(lev->activated,&lev->wait)->tv_sec,
526
                iq_query_timespec(lev->activated,&lev->wait)->tv_nsec,
2 pj 527
                lev->nact[lev->activated],
528
                SS_status_to_a(proc_table[lev->activated].status));
529
 
530
  while (p != NIL) {
531
    kern_printf("Pid: %d\tName: %10s\tStatus: %s\n",
532
                p,
533
                proc_table[p].name,
534
                SS_status_to_a(proc_table[p].status));
29 pj 535
    p = iq_query_next(p, &lev->wait);
2 pj 536
  }
537
}
538
 
38 pj 539
static PID SS_public_schedulerbackground(LEVEL l)
2 pj 540
{
541
  /* the SS catch the background time to exec aperiodic activities */
542
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
543
 
544
  #ifdef DEBUG
545
  kern_printf("SS_levschbg ");
546
  #endif
547
 
548
  lev->flags |= SS_BACKGROUND;
549
 
550
  if (lev->flags & SS_BACKGROUND_BLOCK)
551
    return NIL;
552
  else
29 pj 553
    return iq_query_first(&lev->wait);
2 pj 554
}
555
 
556
/* The on-line guarantee is enabled only if the appropriate flag is set... */
38 pj 557
static int SS_public_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth)
2 pj 558
{
559
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
560
 
561
  #ifdef DEBUG
562
  kern_printf("SS_levguarEDF ");
563
  #endif
564
 
565
  if (*freebandwidth >= lev->U) {
566
    *freebandwidth -= lev->U;
567
    return 1;
568
  }
569
  else
570
    return 0;
571
}
572
 
38 pj 573
static int SS_public_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth)
2 pj 574
{
575
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
576
 
577
  #ifdef DEBUG
578
  kern_printf("SS_levguarRM ");
579
  #endif
580
 
581
  if (*freebandwidth > lev->U + RM_MINFREEBANDWIDTH) {
582
    *freebandwidth -= lev->U;
583
    return 1;
584
  }
585
  else
586
    return 0;
587
}
588
 
589
/*-------------------------------------------------------------------*/
590
 
591
/***  Task functions  ***/
592
 
593
 
38 pj 594
static int SS_public_create(LEVEL l, PID p, TASK_MODEL *m)
2 pj 595
{
596
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
38 pj 597
  SOFT_TASK_MODEL *s;
2 pj 598
 
599
  #ifdef DEBUG
600
  kern_printf("SS_taskcre ");
601
  #endif
602
 
38 pj 603
  if (m->pclass != SOFT_PCLASS) return -1;
604
  if (m->level != 0 && m->level != l) return -1;
605
  s = (SOFT_TASK_MODEL *)m;
606
  if (s->periodicity != APERIODIC) return -1;
607
 
608
  s = (SOFT_TASK_MODEL *)m;
609
 
2 pj 610
  if (s->arrivals == SAVE_ARRIVALS)
611
    lev->nact[p] = 0;
612
  else
613
    lev->nact[p] = -1;
614
 
615
  return 0; /* OK, also if the task cannot be guaranteed */
616
}
617
 
38 pj 618
static void SS_public_dispatch(LEVEL l, PID p, int nostop)
2 pj 619
{
620
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
621
  struct timespec ty;
622
 
623
  #ifdef DEBUG
624
  kern_printf("SS_tdi ");
625
  #endif
626
 
627
  TIMESPEC_ASSIGN(&ty, &schedule_time);
628
  /* set replenish time */
629
  if(!BACKGROUND_ON) {
630
     if(lev->server_active == SS_SERVER_NOTACTIVE) {
631
        lev->server_active = SS_SERVER_ACTIVE;
632
        ADDUSEC2TIMESPEC(lev->period,&ty);
633
        TIMESPEC_ASSIGN(&lev->lastdline, &ty);
634
        #ifdef DEBUG
635
        kern_printf("tdiPID:%d RT:%d.%d ",p,ty.tv_sec,ty.tv_nsec);
636
        #endif
637
        kern_event_post(&ty, SS_replenish_timer,(void *) l);
638
     }
639
  }
640
 
641
  #ifdef DEBUG
642
  if (nostop) kern_printf("NOSTOP!!! ");
643
  #endif
644
 
645
  /* there is at least one task ready inserted in an RM or similar level.
646
     Note that we can't check the status because the scheduler sets it
647
     to exe before calling task_dispatch.
648
     We have to check lev->activated != p instead */
649
  if (lev->activated != p) {
29 pj 650
    iq_extract(p, &lev->wait);
2 pj 651
    #ifdef DEBUG
652
    kern_printf("extr task:%d ",p);
653
    #endif
654
  }
655
  else {
656
    #ifdef DEBUG
657
    if (nostop) kern_printf("(gd status=%d)",proc_table[p].status);
658
    #endif
659
    level_table[lev->scheduling_level]->
38 pj 660
       private_dispatch(lev->scheduling_level,p,nostop);
2 pj 661
  }
662
 
663
  /* set capacity timer */
664
  if (!nostop && !BACKGROUND_ON) {
665
    TIMESPEC_ASSIGN(&ty, &schedule_time);
666
//    kern_printf("ty:%d.%d ",ty.tv_sec,ty.tv_nsec);
667
    ADDUSEC2TIMESPEC((lev->availCs<=0 ? 0:lev->availCs),&ty);
668
//    kern_printf("avCs:%d ty:%d.%d ",lev->availCs,ty.tv_sec,ty.tv_nsec);
669
    /* stop the task if budget ends */
670
    #ifdef DEBUG
671
    kern_printf("PID:%d ST=%d.%d  ",p,ty.tv_sec,ty.tv_nsec);
672
    #endif
673
    cap_timer = kern_event_post(&ty, SS_capacity_timer,(void *) l);
674
  }
675
}
676
 
38 pj 677
static void SS_public_epilogue(LEVEL l, PID p) {
2 pj 678
 
679
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
680
  struct timespec ty;
681
  int tx;
682
 
683
  #ifdef DEBUG
684
  kern_printf("SS_tep ");
685
  #endif
686
 
687
  /* update the server capacity */
688
  if (BACKGROUND_ON)
689
    lev->flags &= ~SS_BACKGROUND;
690
  else {
691
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
692
//    kern_printf("ty:%d.%d ",ty.tv_sec,ty.tv_nsec);
693
    tx = TIMESPEC2USEC(&ty);
694
    lev->availCs -= tx;
695
//    kern_printf("avCs:%d ty:%d.%d ",lev->availCs,ty.tv_sec,ty.tv_nsec);
696
    lev->replenish_amount += tx;
697
    #ifdef DEBUG
698
    kern_printf("RA:%d ",lev->replenish_amount);
699
    #endif
700
  }
701
 
702
  /* check if the server capacity is finished... */
703
  if (lev->availCs <= 0) {
704
    /* The server slice has finished... do the task_end!!!
705
       A first version of the module used the task_endcycle, but it was
706
       not conceptually correct because the task didn't stop because it
707
       finished all the work, but because the server didn't have budget!
708
       So, if the task_endcycle is called, the task remain into the
709
       master level, and we can't wake him up if, for example, another
710
       task point the shadow to it!!! */
711
 
712
    /* set replenish amount */
713
    if(!(BACKGROUND_ON)) {
714
      if(lev->server_active == SS_SERVER_ACTIVE) {
715
        lev->server_active = SS_SERVER_NOTACTIVE;
716
        if(ssq_inslast(l, lev->replenish_amount) == NIL) {
717
          kern_printf("SS: no more space to post replenishment\n");
718
          kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
38 pj 719
          SS_internal_status(l);
14 pj 720
          kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
2 pj 721
          #ifdef DEBUG
722
          sys_abort(-1);
723
          exit(-1);
724
          #endif
725
        }
726
        lev->replenish_amount = 0;
727
      }
728
    }
729
 
730
    if (lev->activated == p)
38 pj 731
      level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p);
2 pj 732
 
29 pj 733
    iq_insertfirst(p, &lev->wait);
2 pj 734
    proc_table[p].status = SS_WAIT;
735
    lev->activated = NIL;
736
  }
737
  else {
738
    /* The task has been preempted.
739
       It returns into the ready queue or to the
38 pj 740
       wait queue by calling the private_epilogue... */
2 pj 741
 
742
    if (lev->activated == p) {  /* goes into ready queue */
743
      level_table[ lev->scheduling_level ]->
38 pj 744
        private_epilogue(lev->scheduling_level,p);
2 pj 745
    }
746
    else {                      /* goes into wait queue */
29 pj 747
      iq_insertfirst(p, &lev->wait);
2 pj 748
      proc_table[p].status = SS_WAIT;
749
    }
750
  }
751
}
752
 
38 pj 753
static void SS_public_activate(LEVEL l, PID p)
2 pj 754
{
755
        SS_level_des *lev = (SS_level_des *)(level_table[l]);
756
        struct timespec ty;
757
 
758
        #ifdef DEBUG
759
        kern_printf("SS_tacti ");
760
        #endif
761
 
762
        if (lev->activated == p || proc_table[p].status == SS_WAIT) {
763
                if (lev->nact[p] != -1) lev->nact[p]++;
764
        }
765
        else if (proc_table[p].status == SLEEP) {
766
                if (lev->activated == NIL && lev->availCs > 0) {
767
                  if(!BACKGROUND_ON) {
768
                    /* if server is active, replenish time already set */
769
                    if (lev->server_active == SS_SERVER_NOTACTIVE) {
770
                      lev->server_active = SS_SERVER_ACTIVE;
771
                      /* set replenish time */
38 pj 772
                      kern_gettime(&ty);
2 pj 773
                      ADDUSEC2TIMESPEC(lev->period, &ty);
774
                      TIMESPEC_ASSIGN(&lev->lastdline, &ty);
775
                      #ifdef DEBUG
776
                      kern_printf("RT=%d.%d ",ty.tv_sec,ty.tv_nsec);
777
                      #endif
778
                      kern_event_post(&ty, SS_replenish_timer, (void *) l);
779
                    }
780
                  }
781
                  lev->activated = p;
782
                  SS_activation(lev);
783
                }
784
                else {
29 pj 785
                        iq_insertlast(p, &lev->wait);
2 pj 786
                        proc_table[p].status = SS_WAIT;
787
                }
788
        }
789
        else {
790
                kern_printf("SS_REJ%d %d %d %d ",
791
                            p,
792
                            proc_table[p].status,
793
                            lev->activated,
794
                            lev->wait.first);
795
                return;
796
        }
797
}
798
 
38 pj 799
static void SS_public_unblock(LEVEL l, PID p)
2 pj 800
{
801
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
802
 
803
  #ifdef DEBUG
804
  kern_printf("SS_tins ");
805
  #endif
806
  lev->flags &= ~SS_BACKGROUND_BLOCK;
807
 
808
  lev->activated = NIL;
809
 
810
  /* when we reinsert the task into the system, the server capacity
811
     is always 0 because nobody executes with the SS before... */
29 pj 812
  iq_insertfirst(p, &lev->wait);
2 pj 813
  proc_table[p].status = SS_WAIT;
814
}
815
 
38 pj 816
static void SS_public_block(LEVEL l, PID p)
2 pj 817
{
818
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
819
 
820
  #ifdef DEBUG
821
  kern_printf("SS_textr ");
822
  #endif
823
 
824
  /* set replenish amount */
825
  if(!(BACKGROUND_ON)) {
826
    SS_set_ra(l);
827
  }  
828
 
829
  /* clear the server capacity */
830
  lev->availCs = 0;
831
 
832
  lev->flags |= SS_BACKGROUND_BLOCK;
833
 
834
  if (lev->activated == p)
38 pj 835
    level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p);
2 pj 836
}
837
 
38 pj 838
static int SS_public_message(LEVEL l, PID p, void *m)
2 pj 839
{
840
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
841
  struct timespec ty;
842
  int tx;
843
 
844
  #ifdef DEBUG
845
  kern_printf("SS_tendcy ");
846
  #endif
847
 
848
  /* update the server capacity */
849
  if (BACKGROUND_ON)
850
    lev->flags &= ~SS_BACKGROUND;
851
  else {
852
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
853
    tx = TIMESPEC2USEC(&ty);
854
    lev->availCs -= tx;
855
    lev->replenish_amount += tx;
856
    #ifdef DEBUG
857
    kern_printf("PID:%d RA=%d ",lev->replenish_amount);
858
    #endif
859
  }
860
 
861
  if (lev->activated == p)
38 pj 862
    level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p);
2 pj 863
  else
29 pj 864
    iq_extract(p, &lev->wait);
2 pj 865
 
866
  if (lev->nact[p] > 0) {
867
    lev->nact[p]--;
29 pj 868
    iq_insertlast(p, &lev->wait);
2 pj 869
    proc_table[p].status = SS_WAIT;
870
  }
871
  else {
872
    proc_table[p].status = SLEEP;
873
  }
874
 
29 pj 875
  lev->activated = iq_getfirst(&lev->wait);
2 pj 876
  if (lev->activated != NIL) {
877
    SS_activation(lev);
878
  }
879
  else {
880
    /* No more task to schedule; set replenish amount */
881
    if(!(BACKGROUND_ON)) {
882
      SS_set_ra(l);
883
    }
884
  }
38 pj 885
 
886
  jet_update_endcycle(); /* Update the Jet data... */
887
  trc_logevent(TRC_ENDCYCLE,&exec_shadow); /* tracer stuff */
888
 
889
  return 0;
2 pj 890
}
891
 
38 pj 892
static void SS_public_end(LEVEL l, PID p)
2 pj 893
{
894
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
895
  struct timespec ty;
896
  int tx;
897
 
898
  #ifdef DEBUG
899
  kern_printf("SS_tend ");
900
  #endif
901
 
902
  /* update the server capacity */
903
  if (BACKGROUND_ON)
904
    lev->flags &= ~SS_BACKGROUND;
905
  else {
906
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
907
    tx = TIMESPEC2USEC(&ty);
908
    lev->availCs -= tx;
909
    lev->replenish_amount += tx;
910
    #ifdef DEBUG
911
    kern_printf("PID:%d RA=%d ",p,lev->replenish_amount);
912
    #endif
913
  }
914
 
915
  if (lev->activated == p)
38 pj 916
    level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p);
2 pj 917
 
918
  proc_table[p].status = FREE;
29 pj 919
  iq_insertfirst(p,&freedesc);
2 pj 920
 
29 pj 921
  lev->activated = iq_getfirst(&lev->wait);
2 pj 922
  if (lev->activated != NIL) {
923
    SS_activation(lev);
924
  }
925
  else {
926
    if(!(BACKGROUND_ON)){
927
      /* No more task to schedule; set replenish amount */
928
      SS_set_ra(l);
929
    }
930
  }
931
}
932
 
933
/*-------------------------------------------------------------------*/
934
 
935
/*** Registration functions ***/
936
 
937
 
938
/*+ Registration function:
939
    int flags                 the init flags ... see SS.h +*/
38 pj 940
LEVEL SS_register_level(int flags, LEVEL master, int Cs, int per)
2 pj 941
{
942
  LEVEL l;            /* the level that we register */
943
  SS_level_des *lev;  /* for readableness only */
944
  PID i;              /* a counter */
945
 
946
  /* request an entry in the level_table */
38 pj 947
  l = level_alloc_descriptor(sizeof(SS_level_des));
2 pj 948
 
38 pj 949
  lev = (SS_level_des *)level_table[l];
2 pj 950
 
38 pj 951
  printk("    lev=%d\n",(int)lev);
2 pj 952
 
953
  /* fill the standard descriptor */
954
 
955
  if (flags & SS_ENABLE_BACKGROUND)
38 pj 956
    lev->l.public_scheduler = SS_public_schedulerbackground;
2 pj 957
 
958
  if (flags & SS_ENABLE_GUARANTEE_EDF)
38 pj 959
    lev->l.public_guarantee = SS_public_guaranteeEDF;
2 pj 960
  else if (flags & SS_ENABLE_GUARANTEE_RM)
38 pj 961
    lev->l.public_guarantee = SS_public_guaranteeRM;
2 pj 962
  else
38 pj 963
    lev->l.public_guarantee = NULL;
2 pj 964
 
38 pj 965
  lev->l.public_create    = SS_public_create;
966
  lev->l.public_end       = SS_public_end;
967
  lev->l.public_dispatch  = SS_public_dispatch;
968
  lev->l.public_epilogue  = SS_public_epilogue;
969
  lev->l.public_activate  = SS_public_activate;
970
  lev->l.public_unblock   = SS_public_unblock;
971
  lev->l.public_block     = SS_public_block;
972
  lev->l.public_message   = SS_public_message;
2 pj 973
 
974
  /* fill the SS descriptor part */
975
 
976
  for (i=0; i<MAX_PROC; i++)
977
     lev->nact[i] = -1;
978
 
979
  lev->Cs = Cs;
980
  lev->availCs = Cs;
981
 
982
  lev->period = per;
983
 
29 pj 984
  iq_init(&lev->wait, &freedesc, 0);
2 pj 985
  lev->activated = NIL;
986
 
987
  lev->U = (MAX_BANDWIDTH / per) * Cs;
988
 
989
  lev->scheduling_level = master;
990
 
991
  lev->flags = flags & 0x07;
992
 
993
  /* This is superfluos. I do it for robustness */
994
  for (i=0;i<SS_MAX_REPLENISH;lev->replenishment[i++]=0);
995
 
996
  /* Initialize replenishment stuff */
997
  lev->rfirst=0;
998
  lev->rlast=0;
999
  lev->rcount=0;
1000
  lev->replenish_amount=0;
1001
  lev->server_active=SS_SERVER_NOTACTIVE;
38 pj 1002
 
1003
  return l;
2 pj 1004
}
1005
 
1006
bandwidth_t SS_usedbandwidth(LEVEL l)
1007
{
1008
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
38 pj 1009
 
1010
  return lev->U;
2 pj 1011
}
1012
 
1013
int SS_availCs(LEVEL l) {
1014
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
38 pj 1015
 
1016
  return lev->availCs;
2 pj 1017
}