Subversion Repositories shark

Rev

Rev 961 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
961 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
23
 CVS :        $Id: ss.c,v 1.1 2005-02-25 10:55:09 pj Exp $
24
 
25
 File:        $File$
26
 Revision:    $Revision: 1.1 $
27
 Last update: $Date: 2005-02-25 10:55:09 $
28
 ------------
29
 
30
 This file contains the aperiodic Sporadic Server (SS).
31
 
32
 Note: in the following, server capacity and server budget are used as
33
       synonyms.
34
 
35
 When scheduling in background  the flags field has the SS_BACKGROUND bit set
36
 
37
 When scheduling a task because it is pointed by another task via shadows,
38
 the task have to be extracted from the wait queue or the master level. To
39
 check this we have to look at the activated field; it is != NIL if a task
40
 is inserted into the master level. Only a task at a time can be inserted
41
 into the master level.
42
 
43
 The capacity of the server must be updated
44
 - when scheduling a task normally
45
 - when scheduling a task because it is pointed by a shadow
46
   but not when scheduling in background.
47
 
48
 When a task is extracted from the system no scheduling has to be done
49
 until the task reenter into the system. To implement this, when a task
50
 is extracted we block the background scheduling (the scheduling with the
51
 master level is already blocked because the activated field is not
52
 reset to NIL) using the SS_BACKGROUNDBLOCK bit.
53
 
54
 nact[p] is -1 if the task set the activations to SKIP, >= 0 otherwise
55
 
56
 In contrast to classic SS scheme, the activation happens when
57
 a task does a create request while there is positive budget (instead to
58
 becomes active when there is a running task with priority higger then or
59
 equal to the server).
60
 So the replenish time is estabished on task arrival time. Replenish time
61
 is calculated as usual: activation time + server period.
62
 When the server ends its budget, becomes not active until a replenishment
63
 occurs.
64
 
65
 When a task ends its computation and there are no tasks to schedule or,
66
 again, the server budget ends, a replenish amount is posted so that, when
67
 replenish time fires, the server budget will be updated. Replenish
68
 amount is determined depending on how much time tasks have ran.
69
 Replenish amount does't takes into account periods during witch tasks
70
 handled by SS are preempted.
71
 
72
 There are two models used to handle a task is running into a critic section
73
 (owning a mutex): "nostop" model and "stop" model.
74
 Using the "nostop" model, a task that runs into a critic section is not
75
 stopped when server ends its budget. This is done so higger priority tasks
76
 waiting for mutex are not blocked for so much time to replenish time occurs.
77
 When this happens the server capacity becomes negative and the replenish
78
 amount takes into account the negative budget part.
79
 With "stop" model running task is always suspended when server budget ends.
80
 If suspended task owns a mutex shared with higger priority task, the last
81
 one cannot runs until the mutex will be released. Higger priority task
82
 must waits at least upto next replenish time, when server budget will be
83
 refulled and suspended task runs again.
84
 
85
 Using "nostop" model, SS can uses more bandwidth respect to assigned
86
 capacity (due to negative budgets). So, calculating the guarantee, the
87
 longer critic section of all tasks handled by SS must be considered.
88
 
89
 SS can be used either with EDF or RM master level.
90
 
91
 Read SS.h for further details.
92
 
93
**/
94
 
95
/*
96
 * Copyright (C) 2000 Paolo Gai
97
 *
98
 * This program is free software; you can redistribute it and/or modify
99
 * it under the terms of the GNU General Public License as published by
100
 * the Free Software Foundation; either version 2 of the License, or
101
 * (at your option) any later version.
102
 *
103
 * This program is distributed in the hope that it will be useful,
104
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
105
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
106
 * GNU General Public License for more details.
107
 *
108
 * You should have received a copy of the GNU General Public License
109
 * along with this program; if not, write to the Free Software
110
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
111
 *
112
 */
113
 
114
 
115
#include <stdlib.h>
116
#include <ss/ss/ss.h>
1689 fabio 117
#include <arch/stdio.h>
118
#include <arch/string.h>
961 pj 119
 
120
#include <ll/sys/ll/event.h>
121
 
122
#include <kernel/const.h>
123
#include <kernel/model.h>
124
#include <kernel/model.h>
125
#include <kernel/descr.h>
126
#include <kernel/var.h>
127
#include <kernel/func.h>
128
 
129
#include <tracer.h>
130
 
131
/* For debugging purpose */
132
//#define DEBUG 1
133
 
134
/*+ Status used in the level +*/
135
#define SS_WAIT          APER_STATUS_BASE    /*+ waiting the service   +*/
136
 
137
/*+ Some useful macros +*/
138
#define BACKGROUND_ON  (lev->flags & SS_BACKGROUND)
139
 
140
extern struct event *firstevent;
141
 
142
/*+ the level redefinition for the Sporadic Server +*/
143
typedef struct {
144
  level_des l;     /*+ the standard level descriptor          +*/
145
 
146
  /* The wcet are stored in the task descriptor's priority
147
     field, so no other fields are needed                      */
148
 
149
  int nact[MAX_PROC]; /*+ number of pending activations       +*/
150
 
151
  struct timespec lastdline; /*+ the last deeadline assigned to
152
                                 a SS task                    +*/
153
 
154
  int Cs;          /*+ server capacity                        +*/
155
  int availCs;     /*+ server avail time                      +*/
156
  int period;      /*+ Server period +*/
157
 
158
  bandwidth_t U;   /*+ the used bandwidth by the server       +*/
159
 
160
  IQUEUE wait;     /*+ the wait queue of the SS               +*/
161
  PID activated;   /*+ the task inserted in another queue     +*/
162
 
163
  int flags;       /*+ the init flags...                      +*/
164
 
165
 
166
  LEVEL scheduling_level;
167
 
168
  int replenishment[SS_MAX_REPLENISH]; /*+ contains replenish amounts +*/
169
  int rfirst,rlast;                    /*+ first and last valid replenish
170
                                            in replenish queue +*/
171
  int rcount;                           /*+ queued replenishments +*/
172
 
173
  int replenish_amount;            /*+ partial replenishments before post +*/
174
  ss_status server_active;         /*+ Is server active? +*/
175
 
176
} SS_level_des;
177
 
178
/*+ function prototypes +*/
179
void SS_internal_status(LEVEL l);
180
static void SS_replenish_timer(void *arg);
181
/*-------------------------------------------------------------------*/
182
 
183
/*** Utility functions ***/
184
 
185
 
186
/* These are for dinamic queue. **Disabled** */
187
#if 0
188
/* These routines are not tested, be carefull */
189
 
190
/*+ SS local memory allocator.
191
    Can be used for performance optimization.
192
    The interface is the same of kern_alloc() +*/
193
void inline * ss_alloc(DWORD b) {
194
        /* Now simply wraps to standard kernel alloc */
195
        return kern_alloc(b);
196
}
197
 
198
void ssq_inslast(LEVEL l, replenishq *elem) {
199
 
200
        SS_level_des *lev = (SS_level_des *) level_table[l];
201
 
202
        if(lev->rqueue_last == NULL) { /* empty queue */
203
                lev->rqueue_last=elem;
204
                lev->rqueue_first=elem;
205
                return;
206
        }
207
        elem->next = NULL;
208
        lev->rqueue_last->next = elem;
209
        lev->rqueue_last = elem;
210
}
211
 
212
replenishq *ssq_getfirst(LEVEL l) {
213
 
214
        SS_level_des *lev = (SS_level_des *) level_table[l];
215
        replenishq *tmp;
216
 
217
        if(lev->rqueue_first == NULL) { /* empty queue */
218
                return 0;
219
        }
220
        tmp = lev->rqueue_first;
221
        lev->rqueue_first = tmp->next;
222
        if(lev->rqueue_first == NULL) { /* no more elements */
223
                lev->rqueue_last = NULL;
224
        }
225
        tmp->next = NULL;       /* to remove dangling pointer */
226
        return tmp;
227
}
228
#endif
229
 
230
/* For queue implemented with array.
231
   SS_MAX_REPLENISH array size assumed */
232
 
233
/*+ Insert an element at tail of replenish queue
234
        LEVEL l                 module level
235
        int   amount            element to insert
236
 
237
        RETURNS:
238
 
239
          NIL   no more space for insertion +*/
240
static inline int ssq_inslast (LEVEL l, int amount) {
241
 
242
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
243
 
244
  #ifdef DEBUG
245
  kern_printf("insl ");
246
  #endif
247
 
248
  if (lev->rcount == SS_MAX_REPLENISH) {
249
    return NIL; /* no more space in the queue */
250
  }
251
 
252
  lev->replenishment[lev->rlast++] = amount;
253
  lev->rlast %= SS_MAX_REPLENISH;
254
  lev->rcount++;
255
  #ifdef DEBUG
256
  printf_xy(0,0,WHITE,"%d",lev->rcount);
257
  #endif
258
 
259
  return 0;
260
}
261
 
262
/*+ Get first element from replenish queue
263
        LEVEL l         module level
264
 
265
        RETURS:
266
          extracted element
267
          NIL on empty queue +*/
268
static inline int ssq_getfirst (LEVEL l) {
269
 
270
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
271
  int tmp;
272
 
273
  #ifdef DEBUG
274
  kern_printf("getf ");
275
  #endif
276
 
277
  if (lev->rcount == 0) {
278
    return NIL; /* empty queue */
279
  }
280
  tmp = lev->replenishment[lev->rfirst++];
281
  lev->rfirst %= SS_MAX_REPLENISH;
282
  lev->rcount--;
283
  #ifdef DEBUG
284
  printf_xy(0,0,WHITE,"%d",lev->rcount);
285
  #endif
286
  return tmp;
287
}
288
 
289
/*+ Enquire for empty queue
290
        LEVEL l         module level
291
 
292
        RETURS:
293
 
294
          1     queue is empty +*/
295
static inline int ssq_isempty (LEVEL l) {
296
 
297
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
298
 
299
  return !(lev->rcount);
300
 
301
//  if(lev->rcount == 0)
302
//    return 1;
303
//  return 0;
304
}
305
 
306
/*+ Set replenish amount for budget used during task execution
307
        LEVEL l         module level */
308
static inline void SS_set_ra(LEVEL l)
309
{
310
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
311
 
312
  /* replenish must be set when the server is still active */
313
  if(lev->server_active == SS_SERVER_ACTIVE) {
314
    lev->server_active = SS_SERVER_NOTACTIVE;
315
    if(ssq_inslast(l, lev->replenish_amount) == NIL) {
316
      kern_printf("SS: no more space to post replenishment\n");
317
      kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
318
      SS_internal_status(l);
319
      kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
320
      #ifdef DEBUG
321
      exit(-1);
322
      #endif
323
    }
324
    lev->replenish_amount = 0;
325
  }
326
  else {
327
    kern_printf("SS not active when posting R.A.\n");
328
    SS_internal_status(l);
329
    kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
330
    #ifdef DEBUG
331
    exit(-1);
332
    #endif
333
  }
334
}
335
/* ------------------------------------------------------------------ */
336
 
337
/* This static function activates the task pointed by lev->activated) */
338
static inline void SS_activation(SS_level_des *lev)
339
{
340
    /* those two defines are for readableness */
341
    PID   p;
342
    LEVEL m;
343
 
344
    JOB_TASK_MODEL j;          /* the guest model */
345
//    struct timespec ty;
346
 
347
    #ifdef DEBUG
348
    kern_printf("SS_acti ");
349
    #endif
350
 
351
    p = lev->activated;
352
    m = lev->scheduling_level;
353
 
354
#if 0
355
    /* if server is active, replenish time already set */
356
    if (lev->server_active == SS_SERVER_NOTACTIVE) {
357
       lev->server_active = SS_SERVER_ACTIVE;
358
       /* set replenish time */
359
       TIMESPEC_ASSIGN(&ty, &proc_table[p].request_time);
360
       ADDUSEC2TIMESPEC(lev->period, &ty);
361
       TIMESPEC_ASSIGN(&lev->lastdline, &ty);
362
       #ifdef DEBUG
363
       kern_printf("RT:%d.%d ",ty.tv_sec,ty.tv_nsec);
364
       #endif
365
       kern_event_post(&ty, SS_replenish_timer, (void *) l);
366
    }
367
#endif
368
 
369
    job_task_default_model(j,lev->lastdline);
370
    job_task_def_period(j,lev->period);
371
    level_table[m]->private_insert(m,p,(TASK_MODEL *)&j);
372
 
373
    #ifdef DEBUG
374
    kern_printf("PID:%p lastdl:%d.%d ",p,lev->lastdline.tv_sec,lev->lastdline.tv_nsec);
375
    #endif
376
}
377
 
378
/*+
379
    Before call capacity_timer, update server capacity
380
    and replenish amount.
381
+*/
382
static void SS_capacity_timer(void *arg) {
383
 
384
        LEVEL l = (LEVEL)arg;
385
        SS_level_des *lev = (SS_level_des *)(level_table[l]);
386
        struct timespec ty;
387
        int tx;
388
 
389
        #ifdef DEBUG
390
        kern_printf("SS_captim ");
391
        #endif
392
 
393
        /* set replenish amount */
394
        /* task was running while budget ends */
395
        lev->server_active = SS_SERVER_NOTACTIVE;
396
        SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
397
        tx = TIMESPEC2USEC(&ty);
398
        lev->availCs -= tx;
399
        if(ssq_inslast(l, tx+lev->replenish_amount) == NIL) {
400
           kern_printf("SS: no more space to post replenishment\n");
401
           kern_printf("    You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
402
           SS_internal_status(l);
403
           kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
404
           #ifdef DEBUG
405
           exit(-1);
406
           #endif
407
        }
408
        lev->replenish_amount = 0;
409
        capacity_timer(NULL);
410
}
411
 
412
static void SS_replenish_timer(void *arg)
413
{
414
  LEVEL l = (LEVEL)arg;
415
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
416
  struct timespec ty;
417
  int amount;
418
 
419
    #ifdef DEBUG
420
    kern_printf("SS_reptim ");
421
    #endif
422
 
423
  /* availCs may be <0 because a task executed via a shadow for many time
424
     lev->activated == NIL only if the prec task was finished and there
425
     was not any other task to be put in the ready queue
426
     ... we are now activating the next task */
427
  if ((amount = ssq_getfirst(l)) != NIL) {
428
    lev->availCs += amount;
429
    #ifdef DEBUG
430
    kern_printf("AvaCs=%d ",lev->availCs);
431
    #endif
432
    if (lev->availCs > lev->Cs) {
433
      /* This should not be possible. I do so for robustness. */
434
      lev->availCs = lev->Cs;
435
      #ifdef DEBUG
436
      kern_printf("SS warning: budget higher then server capacity. Set to Cs.");
437
      #endif
438
    }
439
    if (lev->availCs <= 0) {
440
      /* we can be here if nostop model is used */
441
      #ifdef DEBUG
442
      kern_printf("WARNING: SS has non positive capacity after replenish.");
443
      #endif
444
      /* if there isn't pending replenishment and server
445
         is not active we must refull somehow.
446
         Otherwise SS remains not active forever */
447
      if(ssq_isempty(l) && lev->server_active == SS_SERVER_NOTACTIVE) {
448
        lev->availCs = lev->Cs;
449
        kern_printf("SS was full replenished due to irreversible non positive budget!!!\n");
450
        kern_printf("You should review your time extimation for critical sections ;)\n");
451
      }
452
    }
453
  }
454
  else {
455
    /* replenish queue is empty */
456
    kern_printf("Replenish Timer fires but no Replenish Amount defined\n");
457
    SS_internal_status(l);
458
    kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
459
    #ifdef DEBUG
460
    exit(-1);
461
    #endif
462
  }
463
 
464
  if (lev->availCs > 0 && lev->activated == NIL) {
465
    if (iq_query_first(&lev->wait) != NIL) {
466
      lev->activated = iq_getfirst(&lev->wait);
467
      /* if server is active, replenish time already set */
468
      if (lev->server_active == SS_SERVER_NOTACTIVE) {
469
         lev->server_active = SS_SERVER_ACTIVE;
470
         /* set replenish time */
471
         kern_gettime(&ty);
472
         ADDUSEC2TIMESPEC(lev->period, &ty);
473
         TIMESPEC_ASSIGN(&lev->lastdline, &ty);
474
         #ifdef DEBUG
475
         kern_printf("RT:%d.%d ",ty.tv_sec,ty.tv_nsec);
476
         #endif
477
         kern_event_post(&ty, SS_replenish_timer, (void *) l);
478
      }
479
      SS_activation(lev);
480
      event_need_reschedule();
481
    }
482
  }
483
}
484
 
485
static char *SS_status_to_a(WORD status)
486
{
487
  if (status < MODULE_STATUS_BASE)
488
    return "Unavailable"; //status_to_a(status);
489
 
490
  switch (status) {
491
    case SS_WAIT         : return "SS_Wait";
492
    default              : return "SS_Unknown";
493
  }
494
}
495
 
496
 
497
/*-------------------------------------------------------------------*/
498
 
499
/*** Level functions ***/
500
 
501
void SS_internal_status(LEVEL l)
502
{
503
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
504
  PID p = iq_query_first(&lev->wait);
505
 
506
  kern_printf("On-line guarantee : %s\n",
507
    (lev->flags & SS_ENABLE_GUARANTEE_EDF ||
508
     lev->flags & SS_ENABLE_GUARANTEE_RM  )?"On":"Off");
509
 
510
  kern_printf("Used Bandwidth    : %u/%u\n",lev->U,MAX_BANDWIDTH);
511
  kern_printf("Period            : %d\n",lev->period);
512
  kern_printf("Capacity          : %d\n",lev->Cs);
513
  kern_printf("Avail capacity    : %d\n",lev->availCs);
514
  kern_printf("Server is %sactive\n",
515
     (lev->server_active == SS_SERVER_NOTACTIVE ? "not ":""));
516
  kern_printf("Pending RAs       : %d\n",lev->rcount);
517
 
518
  if (lev->activated != NIL)
519
    kern_printf("Activated: Pid: %d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
520
                lev->activated,
521
                proc_table[lev->activated].name,
522
                iq_query_timespec(lev->activated,&lev->wait)->tv_sec,
523
                iq_query_timespec(lev->activated,&lev->wait)->tv_nsec,
524
                lev->nact[lev->activated],
525
                SS_status_to_a(proc_table[lev->activated].status));
526
 
527
  while (p != NIL) {
528
    kern_printf("Pid: %d\tName: %10s\tStatus: %s\n",
529
                p,
530
                proc_table[p].name,
531
                SS_status_to_a(proc_table[p].status));
532
    p = iq_query_next(p, &lev->wait);
533
  }
534
}
535
 
536
static PID SS_public_schedulerbackground(LEVEL l)
537
{
538
  /* the SS catch the background time to exec aperiodic activities */
539
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
540
 
541
  #ifdef DEBUG
542
  kern_printf("SS_levschbg ");
543
  #endif
544
 
545
  lev->flags |= SS_BACKGROUND;
546
 
547
  if (lev->flags & SS_BACKGROUND_BLOCK)
548
    return NIL;
549
  else
550
    return iq_query_first(&lev->wait);
551
}
552
 
553
/* The on-line guarantee is enabled only if the appropriate flag is set... */
554
static int SS_public_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth)
555
{
556
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
557
 
558
  #ifdef DEBUG
559
  kern_printf("SS_levguarEDF ");
560
  #endif
561
 
562
  if (*freebandwidth >= lev->U) {
563
    *freebandwidth -= lev->U;
564
    return 1;
565
  }
566
  else
567
    return 0;
568
}
569
 
570
static int SS_public_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth)
571
{
572
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
573
 
574
  #ifdef DEBUG
575
  kern_printf("SS_levguarRM ");
576
  #endif
577
 
578
  if (*freebandwidth > lev->U + RM_MINFREEBANDWIDTH) {
579
    *freebandwidth -= lev->U;
580
    return 1;
581
  }
582
  else
583
    return 0;
584
}
585
 
586
/*-------------------------------------------------------------------*/
587
 
588
/***  Task functions  ***/
589
 
590
 
591
static int SS_public_create(LEVEL l, PID p, TASK_MODEL *m)
592
{
593
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
594
  SOFT_TASK_MODEL *s;
595
 
596
  #ifdef DEBUG
597
  kern_printf("SS_taskcre ");
598
  #endif
599
 
600
  if (m->pclass != SOFT_PCLASS) return -1;
601
  if (m->level != 0 && m->level != l) return -1;
602
  s = (SOFT_TASK_MODEL *)m;
603
  if (s->periodicity != APERIODIC) return -1;
604
 
605
  s = (SOFT_TASK_MODEL *)m;
606
 
607
  if (s->arrivals == SAVE_ARRIVALS)
608
    lev->nact[p] = 0;
609
  else
610
    lev->nact[p] = -1;
611
 
612
  return 0; /* OK, also if the task cannot be guaranteed */
613
}
614
 
615
static void SS_public_dispatch(LEVEL l, PID p, int nostop)
616
{
617
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
618
  struct timespec ty;
619
 
620
  #ifdef DEBUG
621
  kern_printf("SS_tdi ");
622
  #endif
623
 
624
  TIMESPEC_ASSIGN(&ty, &schedule_time);
625
  /* set replenish time */
626
  if(!BACKGROUND_ON) {
627
     if(lev->server_active == SS_SERVER_NOTACTIVE) {
628
        lev->server_active = SS_SERVER_ACTIVE;
629
        ADDUSEC2TIMESPEC(lev->period,&ty);
630
        TIMESPEC_ASSIGN(&lev->lastdline, &ty);
631
        #ifdef DEBUG
632
        kern_printf("tdiPID:%d RT:%d.%d ",p,ty.tv_sec,ty.tv_nsec);
633
        #endif
634
        kern_event_post(&ty, SS_replenish_timer,(void *) l);
635
     }
636
  }
637
 
638
  #ifdef DEBUG
639
  if (nostop) kern_printf("NOSTOP!!! ");
640
  #endif
641
 
642
  /* there is at least one task ready inserted in an RM or similar level.
643
     Note that we can't check the status because the scheduler sets it
644
     to exe before calling task_dispatch.
645
     We have to check lev->activated != p instead */
646
  if (lev->activated != p) {
647
    iq_extract(p, &lev->wait);
648
    #ifdef DEBUG
649
    kern_printf("extr task:%d ",p);
650
    #endif
651
  }
652
  else {
653
    #ifdef DEBUG
654
    if (nostop) kern_printf("(gd status=%d)",proc_table[p].status);
655
    #endif
656
    level_table[lev->scheduling_level]->
657
       private_dispatch(lev->scheduling_level,p,nostop);
658
  }
659
 
660
  /* set capacity timer */
661
  if (!nostop && !BACKGROUND_ON) {
662
    TIMESPEC_ASSIGN(&ty, &schedule_time);
663
//    kern_printf("ty:%d.%d ",ty.tv_sec,ty.tv_nsec);
664
    ADDUSEC2TIMESPEC((lev->availCs<=0 ? 0:lev->availCs),&ty);
665
//    kern_printf("avCs:%d ty:%d.%d ",lev->availCs,ty.tv_sec,ty.tv_nsec);
666
    /* stop the task if budget ends */
667
    #ifdef DEBUG
668
    kern_printf("PID:%d ST=%d.%d  ",p,ty.tv_sec,ty.tv_nsec);
669
    #endif
670
    cap_timer = kern_event_post(&ty, SS_capacity_timer,(void *) l);
671
  }
672
}
673
 
674
static void SS_public_epilogue(LEVEL l, PID p) {
675
 
676
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
677
  struct timespec ty;
678
  int tx;
679
 
680
  #ifdef DEBUG
681
  kern_printf("SS_tep ");
682
  #endif
683
 
684
  /* update the server capacity */
685
  if (BACKGROUND_ON)
686
    lev->flags &= ~SS_BACKGROUND;
687
  else {
688
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
689
//    kern_printf("ty:%d.%d ",ty.tv_sec,ty.tv_nsec);
690
    tx = TIMESPEC2USEC(&ty);
691
    lev->availCs -= tx;
692
//    kern_printf("avCs:%d ty:%d.%d ",lev->availCs,ty.tv_sec,ty.tv_nsec);
693
    lev->replenish_amount += tx;
694
    #ifdef DEBUG
695
    kern_printf("RA:%d ",lev->replenish_amount);
696
    #endif
697
  }
698
 
699
  /* check if the server capacity is finished... */
700
  if (lev->availCs <= 0) {
701
    /* The server slice has finished... do the task_end!!!
702
       A first version of the module used the task_endcycle, but it was
703
       not conceptually correct because the task didn't stop because it
704
       finished all the work, but because the server didn't have budget!
705
       So, if the task_endcycle is called, the task remain into the
706
       master level, and we can't wake him up if, for example, another
707
       task point the shadow to it!!! */
708
 
709
    /* set replenish amount */
710
    if(!(BACKGROUND_ON)) {
711
      if(lev->server_active == SS_SERVER_ACTIVE) {
712
        lev->server_active = SS_SERVER_NOTACTIVE;
713
        if(ssq_inslast(l, lev->replenish_amount) == NIL) {
714
          kern_printf("SS: no more space to post replenishment\n");
715
          kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
716
          SS_internal_status(l);
717
          kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
718
          #ifdef DEBUG
719
          exit(-1);
720
          #endif
721
        }
722
        lev->replenish_amount = 0;
723
      }
724
    }
725
 
726
    if (lev->activated == p)
727
      level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p);
728
 
729
    iq_insertfirst(p, &lev->wait);
730
    proc_table[p].status = SS_WAIT;
731
    lev->activated = NIL;
732
  }
733
  else {
734
    /* The task has been preempted.
735
       It returns into the ready queue or to the
736
       wait queue by calling the private_epilogue... */
737
 
738
    if (lev->activated == p) {  /* goes into ready queue */
739
      level_table[ lev->scheduling_level ]->
740
        private_epilogue(lev->scheduling_level,p);
741
    }
742
    else {                      /* goes into wait queue */
743
      iq_insertfirst(p, &lev->wait);
744
      proc_table[p].status = SS_WAIT;
745
    }
746
  }
747
}
748
 
749
static void SS_public_activate(LEVEL l, PID p, struct timespec *t)
750
{
751
        SS_level_des *lev = (SS_level_des *)(level_table[l]);
752
 
753
        #ifdef DEBUG
754
        kern_printf("SS_tacti ");
755
        #endif
756
 
757
        if (lev->activated == p || proc_table[p].status == SS_WAIT) {
758
                if (lev->nact[p] != -1) lev->nact[p]++;
759
        }
760
        else if (proc_table[p].status == SLEEP) {
761
                if (lev->activated == NIL && lev->availCs > 0) {
762
                  if(!BACKGROUND_ON) {
763
                    /* if server is active, replenish time already set */
764
                    if (lev->server_active == SS_SERVER_NOTACTIVE) {
765
                      lev->server_active = SS_SERVER_ACTIVE;
766
                      /* set replenish time */
767
                      ADDUSEC2TIMESPEC(lev->period, t);
768
                      TIMESPEC_ASSIGN(&lev->lastdline, t);
769
                      #ifdef DEBUG
770
                      kern_printf("RT=%d.%d ",t->tv_sec,t->tv_nsec);
771
                      #endif
772
                      kern_event_post(t, SS_replenish_timer, (void *) l);
773
                    }
774
                  }
775
                  lev->activated = p;
776
                  SS_activation(lev);
777
                }
778
                else {
779
                        iq_insertlast(p, &lev->wait);
780
                        proc_table[p].status = SS_WAIT;
781
                }
782
        }
783
        else {
784
                kern_printf("SS_REJ%d %d %d %d ",
785
                            p,
786
                            proc_table[p].status,
787
                            lev->activated,
788
                            lev->wait.first);
789
                return;
790
        }
791
}
792
 
793
static void SS_public_unblock(LEVEL l, PID p)
794
{
795
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
796
 
797
  #ifdef DEBUG
798
  kern_printf("SS_tins ");
799
  #endif
800
  lev->flags &= ~SS_BACKGROUND_BLOCK;
801
 
802
  lev->activated = NIL;
803
 
804
  /* when we reinsert the task into the system, the server capacity
805
     is always 0 because nobody executes with the SS before... */
806
  iq_insertfirst(p, &lev->wait);
807
  proc_table[p].status = SS_WAIT;
808
}
809
 
810
static void SS_public_block(LEVEL l, PID p)
811
{
812
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
813
 
814
  #ifdef DEBUG
815
  kern_printf("SS_textr ");
816
  #endif
817
 
818
  /* set replenish amount */
819
  if(!(BACKGROUND_ON)) {
820
    SS_set_ra(l);
821
  }  
822
 
823
  /* clear the server capacity */
824
  lev->availCs = 0;
825
 
826
  lev->flags |= SS_BACKGROUND_BLOCK;
827
 
828
  if (lev->activated == p)
829
    level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p);
830
}
831
 
832
static int SS_public_message(LEVEL l, PID p, void *m)
833
{
834
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
835
  struct timespec ty;
836
  int tx;
837
 
838
  #ifdef DEBUG
839
  kern_printf("SS_tendcy ");
840
  #endif
841
 
842
  /* update the server capacity */
843
  if (BACKGROUND_ON)
844
    lev->flags &= ~SS_BACKGROUND;
845
  else {
846
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
847
    tx = TIMESPEC2USEC(&ty);
848
    lev->availCs -= tx;
849
    lev->replenish_amount += tx;
850
    #ifdef DEBUG
851
    kern_printf("PID:%d RA=%d ",lev->replenish_amount);
852
    #endif
853
  }
854
 
855
  if (lev->activated == p)
856
    level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p);
857
  else
858
    iq_extract(p, &lev->wait);
859
 
860
  if (lev->nact[p] > 0) {
861
    lev->nact[p]--;
862
    iq_insertlast(p, &lev->wait);
863
    proc_table[p].status = SS_WAIT;
864
  }
865
  else {
866
    proc_table[p].status = SLEEP;
867
  }
868
 
869
  lev->activated = iq_getfirst(&lev->wait);
870
  if (lev->activated != NIL) {
871
    SS_activation(lev);
872
  }
873
  else {
874
    /* No more task to schedule; set replenish amount */
875
    if(!(BACKGROUND_ON)) {
876
      SS_set_ra(l);
877
    }
878
  }
879
 
880
  jet_update_endcycle(); /* Update the Jet data... */
881
  TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
882
 
883
  return 0;
884
}
885
 
886
static void SS_public_end(LEVEL l, PID p)
887
{
888
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
889
  struct timespec ty;
890
  int tx;
891
 
892
  #ifdef DEBUG
893
  kern_printf("SS_tend ");
894
  #endif
895
 
896
  /* update the server capacity */
897
  if (BACKGROUND_ON)
898
    lev->flags &= ~SS_BACKGROUND;
899
  else {
900
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
901
    tx = TIMESPEC2USEC(&ty);
902
    lev->availCs -= tx;
903
    lev->replenish_amount += tx;
904
    #ifdef DEBUG
905
    kern_printf("PID:%d RA=%d ",p,lev->replenish_amount);
906
    #endif
907
  }
908
 
909
  if (lev->activated == p)
910
    level_table[lev->scheduling_level]->private_extract(lev->scheduling_level,p);
911
 
912
  proc_table[p].status = FREE;
913
  iq_insertfirst(p,&freedesc);
914
 
915
  lev->activated = iq_getfirst(&lev->wait);
916
  if (lev->activated != NIL) {
917
    SS_activation(lev);
918
  }
919
  else {
920
    if(!(BACKGROUND_ON)){
921
      /* No more task to schedule; set replenish amount */
922
      SS_set_ra(l);
923
    }
924
  }
925
}
926
 
927
/*-------------------------------------------------------------------*/
928
 
929
/*** Registration functions ***/
930
 
931
 
932
/*+ Registration function:
933
    int flags                 the init flags ... see SS.h +*/
934
LEVEL SS_register_level(int flags, LEVEL master, int Cs, int per)
935
{
936
  LEVEL l;            /* the level that we register */
937
  SS_level_des *lev;  /* for readableness only */
938
  PID i;              /* a counter */
939
 
940
  /* request an entry in the level_table */
941
  l = level_alloc_descriptor(sizeof(SS_level_des));
942
 
943
  printk("SS_register_level\n");
944
 
945
  lev = (SS_level_des *)level_table[l];
946
 
947
  /* fill the standard descriptor */
948
 
949
  if (flags & SS_ENABLE_BACKGROUND)
950
    lev->l.public_scheduler = SS_public_schedulerbackground;
951
 
952
  if (flags & SS_ENABLE_GUARANTEE_EDF)
953
    lev->l.public_guarantee = SS_public_guaranteeEDF;
954
  else if (flags & SS_ENABLE_GUARANTEE_RM)
955
    lev->l.public_guarantee = SS_public_guaranteeRM;
956
  else
957
    lev->l.public_guarantee = NULL;
958
 
959
  lev->l.public_create    = SS_public_create;
960
  lev->l.public_end       = SS_public_end;
961
  lev->l.public_dispatch  = SS_public_dispatch;
962
  lev->l.public_epilogue  = SS_public_epilogue;
963
  lev->l.public_activate  = SS_public_activate;
964
  lev->l.public_unblock   = SS_public_unblock;
965
  lev->l.public_block     = SS_public_block;
966
  lev->l.public_message   = SS_public_message;
967
 
968
  /* fill the SS descriptor part */
969
 
970
  for (i=0; i<MAX_PROC; i++)
971
     lev->nact[i] = -1;
972
 
973
  lev->Cs = Cs;
974
  lev->availCs = Cs;
975
 
976
  lev->period = per;
977
 
978
  iq_init(&lev->wait, &freedesc, 0);
979
  lev->activated = NIL;
980
 
981
  lev->U = (MAX_BANDWIDTH / per) * Cs;
982
 
983
  lev->scheduling_level = master;
984
 
985
  lev->flags = flags & 0x07;
986
 
987
  /* This is superfluos. I do it for robustness */
988
  for (i=0;i<SS_MAX_REPLENISH;lev->replenishment[i++]=0);
989
 
990
  /* Initialize replenishment stuff */
991
  lev->rfirst=0;
992
  lev->rlast=0;
993
  lev->rcount=0;
994
  lev->replenish_amount=0;
995
  lev->server_active=SS_SERVER_NOTACTIVE;
996
 
997
  return l;
998
}
999
 
1000
bandwidth_t SS_usedbandwidth(LEVEL l)
1001
{
1002
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
1003
 
1004
  return lev->U;
1005
}
1006
 
1007
int SS_availCs(LEVEL l) {
1008
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
1009
 
1010
  return lev->availCs;
1011
}