Subversion Repositories shark

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
23
 CVS :        $Id: ss.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
24
 
25
 File:        $File$
26
 Revision:    $Revision: 1.1.1.1 $
27
 Last update: $Date: 2002-03-29 14:12:52 $
28
 ------------
29
 
30
 This file contains the aperiodic Sporadic Server (SS).
31
 
32
 Note: in the following, server capacity and server budget are used as
33
       synonyms.
34
 
35
 When scheduling in background  the flags field has the SS_BACKGROUND bit set
36
 
37
 When scheduling a task because it is pointed by another task via shadows,
38
 the task have to be extracted from the wait queue or the master level. To
39
 check this we have to look at the activated field; it is != NIL if a task
40
 is inserted into the master level. Only a task at a time can be inserted
41
 into the master level.
42
 
43
 The capacity of the server must be updated
44
 - when scheduling a task normally
45
 - when scheduling a task because it is pointed by a shadow
46
   but not when scheduling in background.
47
 
48
 When a task is extracted from the system no scheduling has to be done
49
 until the task reenter into the system. To implement this, when a task
50
 is extracted we block the background scheduling (the scheduling with the
51
 master level is already blocked because the activated field is not
52
 reset to NIL) using the SS_BACKGROUNDBLOCK bit.
53
 
54
 nact[p] is -1 if the task set the activations to SKIP, >= 0 otherwise
55
 
56
 In contrast to classic SS scheme, the activation happens when
57
 a task does a create request while there is positive budget (instead to
58
 becomes active when there is a running task with priority higger then or
59
 equal to the server).
60
 So the replenish time is estabished on task arrival time. Replenish time
61
 is calculated as usual: activation time + server period.
62
 When the server ends its budget, becomes not active until a replenishment
63
 occurs.
64
 
65
 When a task ends its computation and there are no tasks to schedule or,
66
 again, the server budget ends, a replenish amount is posted so that, when
67
 replenish time fires, the server budget will be updated. Replenish
68
 amount is determined depending on how much time tasks have ran.
69
 Replenish amount does't takes into account periods during witch tasks
70
 handled by SS are preempted.
71
 
72
 There are two models used to handle a task is running into a critic section
73
 (owning a mutex): "nostop" model and "stop" model.
74
 Using the "nostop" model, a task that runs into a critic section is not
75
 stopped when server ends its budget. This is done so higger priority tasks
76
 waiting for mutex are not blocked for so much time to replenish time occurs.
77
 When this happens the server capacity becomes negative and the replenish
78
 amount takes into account the negative budget part.
79
 With "stop" model running task is always suspended when server budget ends.
80
 If suspended task owns a mutex shared with higger priority task, the last
81
 one cannot runs until the mutex will be released. Higger priority task
82
 must waits at least upto next replenish time, when server budget will be
83
 refulled and suspended task runs again.
84
 
85
 Using "nostop" model, SS can uses more bandwidth respect to assigned
86
 capacity (due to negative budgets). So, calculating the guarantee, the
87
 longer critic section of all tasks handled by SS must be considered.
88
 
89
 SS can be used either with EDF or RM master level.
90
 
91
 Read SS.h for further details.
92
 
93
**/
94
 
95
/*
96
 * Copyright (C) 2000 Paolo Gai
97
 *
98
 * This program is free software; you can redistribute it and/or modify
99
 * it under the terms of the GNU General Public License as published by
100
 * the Free Software Foundation; either version 2 of the License, or
101
 * (at your option) any later version.
102
 *
103
 * This program is distributed in the hope that it will be useful,
104
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
105
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
106
 * GNU General Public License for more details.
107
 *
108
 * You should have received a copy of the GNU General Public License
109
 * along with this program; if not, write to the Free Software
110
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
111
 *
112
 */
113
 
114
 
115
#include <stdlib.h>
116
#include <modules/ss.h>
117
#include <ll/stdio.h>
118
#include <ll/string.h>
119
 
120
#include <ll/sys/ll/event.h>
121
 
122
#include <kernel/const.h>
123
#include <kernel/model.h>
124
#include <kernel/model.h>
125
#include <kernel/descr.h>
126
#include <kernel/var.h>
127
#include <kernel/func.h>
128
 
129
/* For debugging purpose */
130
//#define DEBUG 1
131
 
132
/*+ Status used in the level +*/
133
#define SS_WAIT          APER_STATUS_BASE    /*+ waiting the service   +*/
134
 
135
/*+ Some useful macros +*/
136
#define BACKGROUND_ON  (lev->flags & SS_BACKGROUND)
137
 
138
extern struct event *firstevent;
139
 
140
/*+ the level redefinition for the Sporadic Server +*/
141
typedef struct {
142
  level_des l;     /*+ the standard level descriptor          +*/
143
 
144
  /* The wcet are stored in the task descriptor's priority
145
     field, so no other fields are needed                      */
146
 
147
  int nact[MAX_PROC]; /*+ number of pending activations       +*/
148
 
149
  struct timespec lastdline; /*+ the last deeadline assigned to
150
                                 a SS task                    +*/
151
 
152
  int Cs;          /*+ server capacity                        +*/
153
  int availCs;     /*+ server avail time                      +*/
154
  int period;      /*+ Server period +*/
155
 
156
  bandwidth_t U;   /*+ the used bandwidth by the server       +*/
157
 
158
  QQUEUE wait;     /*+ the wait queue of the SS               +*/
159
  PID activated;   /*+ the task inserted in another queue     +*/
160
 
161
  int flags;       /*+ the init flags...                      +*/
162
 
163
 
164
  LEVEL scheduling_level;
165
 
166
  int replenishment[SS_MAX_REPLENISH]; /*+ contains replenish amounts +*/
167
  int rfirst,rlast;                    /*+ first and last valid replenish
168
                                            in replenish queue +*/
169
  int rcount;                           /*+ queued replenishments +*/
170
 
171
  int replenish_amount;            /*+ partial replenishments before post +*/
172
  ss_status server_active;         /*+ Is server active? +*/
173
 
174
} SS_level_des;
175
 
176
/*+ function prototypes +*/
177
void SS_level_status(LEVEL l);
178
static void SS_replenish_timer(void *arg);
179
/*-------------------------------------------------------------------*/
180
 
181
/*** Utility functions ***/
182
 
183
 
184
/* These are for dinamic queue. **Disabled** */
185
#if 0
186
/* These routines are not tested, be carefull */
187
 
188
/*+ SS local memory allocator.
189
    Can be used for performance optimization.
190
    The interface is the same of kern_alloc() +*/
191
void inline * ss_alloc(DWORD b) {
192
        /* Now simply wraps to standard kernel alloc */
193
        return kern_alloc(b);
194
}
195
 
196
void ssq_inslast(LEVEL l, replenishq *elem) {
197
 
198
        SS_level_des *lev = (SS_level_des *) level_table[l];
199
 
200
        if(lev->rqueue_last == NULL) { /* empty queue */
201
                lev->rqueue_last=elem;
202
                lev->rqueue_first=elem;
203
                return;
204
        }
205
        elem->next = NULL;
206
        lev->rqueue_last->next = elem;
207
        lev->rqueue_last = elem;
208
}
209
 
210
replenishq *ssq_getfirst(LEVEL l) {
211
 
212
        SS_level_des *lev = (SS_level_des *) level_table[l];
213
        replenishq *tmp;
214
 
215
        if(lev->rqueue_first == NULL) { /* empty queue */
216
                return 0;
217
        }
218
        tmp = lev->rqueue_first;
219
        lev->rqueue_first = tmp->next;
220
        if(lev->rqueue_first == NULL) { /* no more elements */
221
                lev->rqueue_last = NULL;
222
        }
223
        tmp->next = NULL;       /* to remove dangling pointer */
224
        return tmp;
225
}
226
#endif
227
 
228
/* For queue implemented with array.
229
   SS_MAX_REPLENISH array size assumed */
230
 
231
/*+ Insert an element at tail of replenish queue
232
        LEVEL l                 module level
233
        int   amount            element to insert
234
 
235
        RETURNS:
236
 
237
          NIL   no more space for insertion +*/
238
static inline int ssq_inslast (LEVEL l, int amount) {
239
 
240
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
241
 
242
  #ifdef DEBUG
243
  kern_printf("insl ");
244
  #endif
245
 
246
  if (lev->rcount == SS_MAX_REPLENISH) {
247
    return NIL; /* no more space in the queue */
248
  }
249
 
250
  lev->replenishment[lev->rlast++] = amount;
251
  lev->rlast %= SS_MAX_REPLENISH;
252
  lev->rcount++;
253
  #ifdef DEBUG
254
  printf_xy(0,0,WHITE,"%d",lev->rcount);
255
  #endif
256
 
257
  return 0;
258
}
259
 
260
/*+ Get first element from replenish queue
261
        LEVEL l         module level
262
 
263
        RETURS:
264
          extracted element
265
          NIL on empty queue +*/
266
static inline int ssq_getfirst (LEVEL l) {
267
 
268
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
269
  int tmp;
270
 
271
  #ifdef DEBUG
272
  kern_printf("getf ");
273
  #endif
274
 
275
  if (lev->rcount == 0) {
276
    return NIL; /* empty queue */
277
  }
278
  tmp = lev->replenishment[lev->rfirst++];
279
  lev->rfirst %= SS_MAX_REPLENISH;
280
  lev->rcount--;
281
  #ifdef DEBUG
282
  printf_xy(0,0,WHITE,"%d",lev->rcount);
283
  #endif
284
  return tmp;
285
}
286
 
287
/*+ Enquire for empty queue
288
        LEVEL l         module level
289
 
290
        RETURS:
291
 
292
          1     queue is empty +*/
293
static inline int ssq_isempty (LEVEL l) {
294
 
295
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
296
 
297
  return !(lev->rcount);
298
 
299
//  if(lev->rcount == 0)
300
//    return 1;
301
//  return 0;
302
}
303
 
304
/*+ Set replenish amount for budget used during task execution
305
        LEVEL l         module level */
306
static inline void SS_set_ra(LEVEL l)
307
{
308
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
309
 
310
  /* replenish must be set when the server is still active */
311
  if(lev->server_active == SS_SERVER_ACTIVE) {
312
    lev->server_active = SS_SERVER_NOTACTIVE;
313
    if(ssq_inslast(l, lev->replenish_amount) == NIL) {
314
      kern_printf("SS: no more space to post replenishment\n");
315
      kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
316
      SS_level_status(l);
317
      kern_raise(XUNVALID_SS_REPLENISH,exec_shadow);
318
      #ifdef DEBUG
319
      sys_abort(-1);
320
      exit(-1);
321
      #endif
322
    }
323
    lev->replenish_amount = 0;
324
  }
325
  else {
326
    kern_printf("SS not active when posting R.A.\n");
327
    SS_level_status(l);
328
    kern_raise(XUNVALID_SS_REPLENISH,exec_shadow);
329
    #ifdef DEBUG
330
    sys_abort(-1);
331
    exit(-1);
332
    #endif
333
  }
334
}
335
/* ------------------------------------------------------------------ */
336
 
337
/* This static function activates the task pointed by lev->activated) */
338
static inline void SS_activation(SS_level_des *lev)
339
{
340
    /* those two defines are for readableness */
341
    PID   p;
342
    LEVEL m;
343
 
344
    JOB_TASK_MODEL j;          /* the guest model */
345
//    struct timespec ty;
346
 
347
    #ifdef DEBUG
348
    kern_printf("SS_acti ");
349
    #endif
350
 
351
    p = lev->activated;
352
    m = lev->scheduling_level;
353
 
354
#if 0
355
    /* if server is active, replenish time already set */
356
    if (lev->server_active == SS_SERVER_NOTACTIVE) {
357
       lev->server_active = SS_SERVER_ACTIVE;
358
       /* set replenish time */
359
       TIMESPEC_ASSIGN(&ty, &proc_table[p].request_time);
360
       ADDUSEC2TIMESPEC(lev->period, &ty);
361
       TIMESPEC_ASSIGN(&lev->lastdline, &ty);
362
       #ifdef DEBUG
363
       kern_printf("RT:%d.%d ",ty.tv_sec,ty.tv_nsec);
364
       #endif
365
       kern_event_post(&ty, SS_replenish_timer, (void *) l);
366
    }
367
#endif
368
 
369
    job_task_default_model(j,lev->lastdline);
370
    job_task_def_period(j,lev->period);
371
    level_table[m]->guest_create(m,p,(TASK_MODEL *)&j);
372
    level_table[m]->guest_activate(m,p);
373
 
374
    #ifdef DEBUG
375
    kern_printf("PID:%p lastdl:%d.%d ",p,lev->lastdline.tv_sec,lev->lastdline.tv_nsec);
376
    #endif
377
}
378
 
379
/*+
380
    Before call capacity_timer, update server capacity
381
    and replenish amount.
382
+*/
383
static void SS_capacity_timer(void *arg) {
384
 
385
        LEVEL l = (LEVEL)arg;
386
        SS_level_des *lev = (SS_level_des *)(level_table[l]);
387
        struct timespec ty;
388
        int tx;
389
 
390
        #ifdef DEBUG
391
        kern_printf("SS_captim ");
392
        #endif
393
 
394
        /* set replenish amount */
395
        /* task was running while budget ends */
396
        lev->server_active = SS_SERVER_NOTACTIVE;
397
        SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
398
        tx = TIMESPEC2USEC(&ty);
399
        lev->availCs -= tx;
400
        if(ssq_inslast(l, tx+lev->replenish_amount) == NIL) {
401
           kern_printf("SS: no more space to post replenishment\n");
402
           kern_printf("    You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
403
           SS_level_status(l);
404
           kern_raise(XUNVALID_SS_REPLENISH,exec_shadow);
405
           #ifdef DEBUG
406
           sys_abort(-1);
407
           exit(-1);
408
           #endif
409
        }
410
        lev->replenish_amount = 0;
411
        capacity_timer(NULL);
412
}
413
 
414
static void SS_replenish_timer(void *arg)
415
{
416
  LEVEL l = (LEVEL)arg;
417
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
418
  struct timespec ty;
419
  int amount;
420
 
421
    #ifdef DEBUG
422
    kern_printf("SS_reptim ");
423
    #endif
424
 
425
  /* availCs may be <0 because a task executed via a shadow for many time
426
     lev->activated == NIL only if the prec task was finished and there
427
     was not any other task to be put in the ready queue
428
     ... we are now activating the next task */
429
  if ((amount = ssq_getfirst(l)) != NIL) {
430
    lev->availCs += amount;
431
    #ifdef DEBUG
432
    kern_printf("AvaCs=%d ",lev->availCs);
433
    #endif
434
    if (lev->availCs > lev->Cs) {
435
      /* This should not be possible. I do so for robustness. */
436
      lev->availCs = lev->Cs;
437
      #ifdef DEBUG
438
      kern_printf("SS warning: budget higher then server capacity. Set to Cs.");
439
      #endif
440
    }
441
    if (lev->availCs <= 0) {
442
      /* we can be here if nostop model is used */
443
      #ifdef DEBUG
444
      kern_printf("WARNING: SS has non positive capacity after replenish.");
445
      #endif
446
      /* if there isn't pending replenishment and server
447
         is not active we must refull somehow.
448
         Otherwise SS remains not active forever */
449
      if(ssq_isempty(l) && lev->server_active == SS_SERVER_NOTACTIVE) {
450
        lev->availCs = lev->Cs;
451
        kern_printf("SS was full replenished due to irreversible non positive budget!!!\n");
452
        kern_printf("You should review your time extimation for critical sections ;)\n");
453
      }
454
    }
455
  }
456
  else {
457
    /* replenish queue is empty */
458
    kern_printf("Replenish Timer fires but no Replenish Amount defined\n");
459
    SS_level_status(l);
460
    kern_raise(XUNVALID_SS_REPLENISH,exec_shadow);
461
    #ifdef DEBUG
462
    sys_abort(-1);
463
    exit(-1);
464
    #endif
465
  }
466
 
467
  if (lev->availCs > 0 && lev->activated == NIL) {
468
    if (qq_queryfirst(&lev->wait) != NIL) {
469
      lev->activated = qq_getfirst(&lev->wait);
470
      /* if server is active, replenish time already set */
471
      if (lev->server_active == SS_SERVER_NOTACTIVE) {
472
         lev->server_active = SS_SERVER_ACTIVE;
473
         /* set replenish time */
474
         ll_gettime(TIME_EXACT, &ty);
475
         ADDUSEC2TIMESPEC(lev->period, &ty);
476
         TIMESPEC_ASSIGN(&lev->lastdline, &ty);
477
         #ifdef DEBUG
478
         kern_printf("RT:%d.%d ",ty.tv_sec,ty.tv_nsec);
479
         #endif
480
         kern_event_post(&ty, SS_replenish_timer, (void *) l);
481
      }
482
      SS_activation(lev);
483
      event_need_reschedule();
484
    }
485
  }
486
}
487
 
488
static char *SS_status_to_a(WORD status)
489
{
490
  if (status < MODULE_STATUS_BASE)
491
    return status_to_a(status);
492
 
493
  switch (status) {
494
    case SS_WAIT         : return "SS_Wait";
495
    default              : return "SS_Unknown";
496
  }
497
}
498
 
499
 
500
/*-------------------------------------------------------------------*/
501
 
502
/*** Level functions ***/
503
 
504
 
505
static int SS_level_accept_task_model(LEVEL l, TASK_MODEL *m)
506
{
507
  #ifdef DEBUG
508
  kern_printf("SS_levacctm cl=%d ",m->pclass);
509
  #endif
510
 
511
  if (m->pclass == SOFT_PCLASS || m->pclass == (SOFT_PCLASS | l) ) {
512
    SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m;
513
 
514
    if (s->periodicity == APERIODIC) {
515
      #ifdef DEBUG
516
      kern_printf("AcceptApe ");
517
      #endif
518
      return 0;
519
    }
520
    #ifdef DEBUG
521
    kern_printf("NAcceptApe ");
522
    #endif
523
  }
524
  #ifdef DEBUG
525
  kern_printf("NAccept ");
526
  #endif
527
  return -1;
528
}
529
 
530
static int SS_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
531
{
532
  /* SS doesn't handles guest tasks */
533
  return -1;
534
}
535
 
536
void SS_level_status(LEVEL l)
537
{
538
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
539
  PID p = qq_queryfirst(&lev->wait);
540
 
541
  kern_printf("On-line guarantee : %s\n",
542
    (lev->flags & SS_ENABLE_GUARANTEE_EDF ||
543
     lev->flags & SS_ENABLE_GUARANTEE_RM  )?"On":"Off");
544
 
545
  kern_printf("Used Bandwidth    : %u/%u\n",lev->U,MAX_BANDWIDTH);
546
  kern_printf("Period            : %d\n",lev->period);
547
  kern_printf("Capacity          : %d\n",lev->Cs);
548
  kern_printf("Avail capacity    : %d\n",lev->availCs);
549
  kern_printf("Server is %sactive\n",
550
     (lev->server_active == SS_SERVER_NOTACTIVE ? "not ":""));
551
  kern_printf("Pending RAs       : %d\n",lev->rcount);
552
 
553
  if (lev->activated != NIL)
554
    kern_printf("Activated: Pid: %d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
555
                lev->activated,
556
                proc_table[lev->activated].name,
557
                proc_table[lev->activated].timespec_priority.tv_sec,
558
                proc_table[lev->activated].timespec_priority.tv_nsec,
559
                lev->nact[lev->activated],
560
                SS_status_to_a(proc_table[lev->activated].status));
561
 
562
  while (p != NIL) {
563
    kern_printf("Pid: %d\tName: %10s\tStatus: %s\n",
564
                p,
565
                proc_table[p].name,
566
                SS_status_to_a(proc_table[p].status));
567
    p = proc_table[p].next;
568
  }
569
}
570
 
571
static PID SS_level_scheduler(LEVEL l)
572
{
573
  #ifdef DEBUG
574
  kern_printf("SS_levsch ");
575
  #endif
576
 
577
  /* the SS don't schedule anything...
578
     it's an RM level or similar that do it! */
579
  return NIL;
580
}
581
 
582
static PID SS_level_schedulerbackground(LEVEL l)
583
{
584
  /* the SS catch the background time to exec aperiodic activities */
585
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
586
 
587
  #ifdef DEBUG
588
  kern_printf("SS_levschbg ");
589
  #endif
590
 
591
  lev->flags |= SS_BACKGROUND;
592
 
593
  if (lev->flags & SS_BACKGROUND_BLOCK)
594
    return NIL;
595
  else
596
    return qq_queryfirst(&lev->wait);
597
}
598
 
599
/* The on-line guarantee is enabled only if the appropriate flag is set... */
600
static int SS_level_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth)
601
{
602
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
603
 
604
  #ifdef DEBUG
605
  kern_printf("SS_levguarEDF ");
606
  #endif
607
 
608
  if (*freebandwidth >= lev->U) {
609
    *freebandwidth -= lev->U;
610
    return 1;
611
  }
612
  else
613
    return 0;
614
}
615
 
616
static int SS_level_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth)
617
{
618
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
619
 
620
  #ifdef DEBUG
621
  kern_printf("SS_levguarRM ");
622
  #endif
623
 
624
  if (*freebandwidth > lev->U + RM_MINFREEBANDWIDTH) {
625
    *freebandwidth -= lev->U;
626
    return 1;
627
  }
628
  else
629
    return 0;
630
}
631
 
632
/*-------------------------------------------------------------------*/
633
 
634
/***  Task functions  ***/
635
 
636
 
637
static int SS_task_create(LEVEL l, PID p, TASK_MODEL *m)
638
{
639
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
640
  SOFT_TASK_MODEL *s = (SOFT_TASK_MODEL *)m; /* if the SS_task_create is
641
                                                called, the pclass must
642
                                                be a valid pclass.       */
643
 
644
  #ifdef DEBUG
645
  kern_printf("SS_taskcre ");
646
  #endif
647
 
648
  if (s->arrivals == SAVE_ARRIVALS)
649
    lev->nact[p] = 0;
650
  else
651
    lev->nact[p] = -1;
652
 
653
  return 0; /* OK, also if the task cannot be guaranteed */
654
}
655
 
656
static void SS_task_detach(LEVEL l, PID p)
657
{
658
  /* No cleanups to do here.
659
     SS level doesn't introduce any dynamic allocated field. */
660
}
661
 
662
static int SS_task_eligible(LEVEL l, PID p)
663
{
664
  return 0; /* If the task p is chosen, it is always eligible */
665
}
666
 
667
static void SS_task_dispatch(LEVEL l, PID p, int nostop)
668
{
669
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
670
  struct timespec ty;
671
 
672
  #ifdef DEBUG
673
  kern_printf("SS_tdi ");
674
  #endif
675
 
676
  TIMESPEC_ASSIGN(&ty, &schedule_time);
677
  /* set replenish time */
678
  if(!BACKGROUND_ON) {
679
     if(lev->server_active == SS_SERVER_NOTACTIVE) {
680
        lev->server_active = SS_SERVER_ACTIVE;
681
        ADDUSEC2TIMESPEC(lev->period,&ty);
682
        TIMESPEC_ASSIGN(&lev->lastdline, &ty);
683
        #ifdef DEBUG
684
        kern_printf("tdiPID:%d RT:%d.%d ",p,ty.tv_sec,ty.tv_nsec);
685
        #endif
686
        kern_event_post(&ty, SS_replenish_timer,(void *) l);
687
     }
688
  }
689
 
690
  #ifdef DEBUG
691
  if (nostop) kern_printf("NOSTOP!!! ");
692
  #endif
693
 
694
  /* there is at least one task ready inserted in an RM or similar level.
695
     Note that we can't check the status because the scheduler sets it
696
     to exe before calling task_dispatch.
697
     We have to check lev->activated != p instead */
698
  if (lev->activated != p) {
699
    qq_extract(p, &lev->wait);
700
    #ifdef DEBUG
701
    kern_printf("extr task:%d ",p);
702
    #endif
703
  }
704
  else {
705
    #ifdef DEBUG
706
    if (nostop) kern_printf("(gd status=%d)",proc_table[p].status);
707
    #endif
708
    level_table[lev->scheduling_level]->
709
       guest_dispatch(lev->scheduling_level,p,nostop);
710
  }
711
 
712
  /* set capacity timer */
713
  if (!nostop && !BACKGROUND_ON) {
714
    TIMESPEC_ASSIGN(&ty, &schedule_time);
715
//    kern_printf("ty:%d.%d ",ty.tv_sec,ty.tv_nsec);
716
    ADDUSEC2TIMESPEC((lev->availCs<=0 ? 0:lev->availCs),&ty);
717
//    kern_printf("avCs:%d ty:%d.%d ",lev->availCs,ty.tv_sec,ty.tv_nsec);
718
    /* stop the task if budget ends */
719
    #ifdef DEBUG
720
    kern_printf("PID:%d ST=%d.%d  ",p,ty.tv_sec,ty.tv_nsec);
721
    #endif
722
    cap_timer = kern_event_post(&ty, SS_capacity_timer,(void *) l);
723
  }
724
}
725
 
726
static void SS_task_epilogue(LEVEL l, PID p) {
727
 
728
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
729
  struct timespec ty;
730
  int tx;
731
 
732
  #ifdef DEBUG
733
  kern_printf("SS_tep ");
734
  #endif
735
 
736
  /* update the server capacity */
737
  if (BACKGROUND_ON)
738
    lev->flags &= ~SS_BACKGROUND;
739
  else {
740
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
741
//    kern_printf("ty:%d.%d ",ty.tv_sec,ty.tv_nsec);
742
    tx = TIMESPEC2USEC(&ty);
743
    lev->availCs -= tx;
744
//    kern_printf("avCs:%d ty:%d.%d ",lev->availCs,ty.tv_sec,ty.tv_nsec);
745
    lev->replenish_amount += tx;
746
    #ifdef DEBUG
747
    kern_printf("RA:%d ",lev->replenish_amount);
748
    #endif
749
  }
750
 
751
  /* check if the server capacity is finished... */
752
  if (lev->availCs <= 0) {
753
    /* The server slice has finished... do the task_end!!!
754
       A first version of the module used the task_endcycle, but it was
755
       not conceptually correct because the task didn't stop because it
756
       finished all the work, but because the server didn't have budget!
757
       So, if the task_endcycle is called, the task remain into the
758
       master level, and we can't wake him up if, for example, another
759
       task point the shadow to it!!! */
760
 
761
    /* set replenish amount */
762
    if(!(BACKGROUND_ON)) {
763
      if(lev->server_active == SS_SERVER_ACTIVE) {
764
        lev->server_active = SS_SERVER_NOTACTIVE;
765
        if(ssq_inslast(l, lev->replenish_amount) == NIL) {
766
          kern_printf("SS: no more space to post replenishment\n");
767
          kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
768
          SS_level_status(l);
769
          kern_raise(XUNVALID_SS_REPLENISH,exec_shadow);
770
          #ifdef DEBUG
771
          sys_abort(-1);
772
          exit(-1);
773
          #endif
774
        }
775
        lev->replenish_amount = 0;
776
      }
777
    }
778
 
779
    if (lev->activated == p)
780
      level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
781
 
782
    qq_insertfirst(p, &lev->wait);
783
    proc_table[p].status = SS_WAIT;
784
    lev->activated = NIL;
785
  }
786
  else {
787
    /* The task has been preempted.
788
       It returns into the ready queue or to the
789
       wait queue by calling the guest_epilogue... */
790
 
791
    if (lev->activated == p) {  /* goes into ready queue */
792
      level_table[ lev->scheduling_level ]->
793
        guest_epilogue(lev->scheduling_level,p);
794
    }
795
    else {                      /* goes into wait queue */
796
      qq_insertfirst(p, &lev->wait);
797
      proc_table[p].status = SS_WAIT;
798
    }
799
  }
800
}
801
 
802
static void SS_task_activate(LEVEL l, PID p)
803
{
804
        SS_level_des *lev = (SS_level_des *)(level_table[l]);
805
        struct timespec ty;
806
 
807
        #ifdef DEBUG
808
        kern_printf("SS_tacti ");
809
        #endif
810
 
811
        if (lev->activated == p || proc_table[p].status == SS_WAIT) {
812
                if (lev->nact[p] != -1) lev->nact[p]++;
813
        }
814
        else if (proc_table[p].status == SLEEP) {
815
                ll_gettime(TIME_EXACT, &proc_table[p].request_time);
816
//                kern_printf("-%d.%d- ",proc_table[p].request_time.tv_sec,proc_table[p].request_time.tv_nsec);
817
                if (lev->activated == NIL && lev->availCs > 0) {
818
                  if(!BACKGROUND_ON) {
819
                    /* if server is active, replenish time already set */
820
                    if (lev->server_active == SS_SERVER_NOTACTIVE) {
821
                      lev->server_active = SS_SERVER_ACTIVE;
822
                      /* set replenish time */
823
                      TIMESPEC_ASSIGN(&ty, &proc_table[p].request_time);
824
                      ADDUSEC2TIMESPEC(lev->period, &ty);
825
                      TIMESPEC_ASSIGN(&lev->lastdline, &ty);
826
                      #ifdef DEBUG
827
                      kern_printf("RT=%d.%d ",ty.tv_sec,ty.tv_nsec);
828
                      #endif
829
                      kern_event_post(&ty, SS_replenish_timer, (void *) l);
830
                    }
831
                  }
832
                  lev->activated = p;
833
                  SS_activation(lev);
834
                }
835
                else {
836
                        qq_insertlast(p, &lev->wait);
837
                        proc_table[p].status = SS_WAIT;
838
                }
839
        }
840
        else {
841
                kern_printf("SS_REJ%d %d %d %d ",
842
                            p,
843
                            proc_table[p].status,
844
                            lev->activated,
845
                            lev->wait.first);
846
                return;
847
        }
848
}
849
 
850
static void SS_task_insert(LEVEL l, PID p)
851
{
852
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
853
 
854
  #ifdef DEBUG
855
  kern_printf("SS_tins ");
856
  #endif
857
  lev->flags &= ~SS_BACKGROUND_BLOCK;
858
 
859
  lev->activated = NIL;
860
 
861
  /* when we reinsert the task into the system, the server capacity
862
     is always 0 because nobody executes with the SS before... */
863
  qq_insertfirst(p, &lev->wait);
864
  proc_table[p].status = SS_WAIT;
865
}
866
 
867
static void SS_task_extract(LEVEL l, PID p)
868
{
869
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
870
 
871
  #ifdef DEBUG
872
  kern_printf("SS_textr ");
873
  #endif
874
 
875
  /* set replenish amount */
876
  if(!(BACKGROUND_ON)) {
877
    SS_set_ra(l);
878
  }  
879
 
880
  /* clear the server capacity */
881
  lev->availCs = 0;
882
 
883
  lev->flags |= SS_BACKGROUND_BLOCK;
884
 
885
  if (lev->activated == p)
886
    level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
887
}
888
 
889
static void SS_task_endcycle(LEVEL l, PID p)
890
{
891
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
892
  struct timespec ty;
893
  int tx;
894
 
895
  #ifdef DEBUG
896
  kern_printf("SS_tendcy ");
897
  #endif
898
 
899
  /* update the server capacity */
900
  if (BACKGROUND_ON)
901
    lev->flags &= ~SS_BACKGROUND;
902
  else {
903
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
904
    tx = TIMESPEC2USEC(&ty);
905
    lev->availCs -= tx;
906
    lev->replenish_amount += tx;
907
    #ifdef DEBUG
908
    kern_printf("PID:%d RA=%d ",lev->replenish_amount);
909
    #endif
910
  }
911
 
912
  if (lev->activated == p)
913
    level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
914
  else
915
    qq_extract(p, &lev->wait);
916
 
917
  if (lev->nact[p] > 0) {
918
    lev->nact[p]--;
919
    qq_insertlast(p, &lev->wait);
920
    proc_table[p].status = SS_WAIT;
921
  }
922
  else {
923
    proc_table[p].status = SLEEP;
924
  }
925
 
926
  lev->activated = qq_getfirst(&lev->wait);
927
  if (lev->activated != NIL) {
928
    SS_activation(lev);
929
  }
930
  else {
931
    /* No more task to schedule; set replenish amount */
932
    if(!(BACKGROUND_ON)) {
933
      SS_set_ra(l);
934
    }
935
  }
936
}
937
 
938
static void SS_task_end(LEVEL l, PID p)
939
{
940
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
941
  struct timespec ty;
942
  int tx;
943
 
944
  #ifdef DEBUG
945
  kern_printf("SS_tend ");
946
  #endif
947
 
948
  /* update the server capacity */
949
  if (BACKGROUND_ON)
950
    lev->flags &= ~SS_BACKGROUND;
951
  else {
952
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
953
    tx = TIMESPEC2USEC(&ty);
954
    lev->availCs -= tx;
955
    lev->replenish_amount += tx;
956
    #ifdef DEBUG
957
    kern_printf("PID:%d RA=%d ",p,lev->replenish_amount);
958
    #endif
959
  }
960
 
961
  if (lev->activated == p)
962
    level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
963
 
964
  proc_table[p].status = FREE;
965
  q_insertfirst(p,&freedesc);
966
 
967
  lev->activated = qq_getfirst(&lev->wait);
968
  if (lev->activated != NIL) {
969
    SS_activation(lev);
970
  }
971
  else {
972
    if(!(BACKGROUND_ON)){
973
      /* No more task to schedule; set replenish amount */
974
      SS_set_ra(l);
975
    }
976
  }
977
}
978
 
979
static void SS_task_sleep(LEVEL l, PID p)
980
{
981
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
982
  struct timespec ty;
983
  int tx;
984
 
985
  #ifdef DEBUG
986
  kern_printf("SS_tasksle ");
987
  #endif
988
 
989
  /* update the server capacity */
990
  if (BACKGROUND_ON)
991
    lev->flags &= ~SS_BACKGROUND;
992
  else {
993
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
994
    tx = TIMESPEC2USEC(&ty);
995
    lev->availCs -= tx;
996
    lev->replenish_amount += tx;
997
    #ifdef DEBUG
998
    kern_printf("PID:%d RA=%d ",p,lev->replenish_amount);
999
    #endif
1000
  }
1001
 
1002
  lev->nact[p] = 0;
1003
 
1004
  if (lev->activated == p)
1005
    level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
1006
  else
1007
    qq_extract(p, &lev->wait);
1008
 
1009
  proc_table[p].status = SLEEP;
1010
 
1011
  lev->activated = qq_getfirst(&lev->wait);
1012
  if (lev->activated != NIL) {
1013
    SS_activation(lev);
1014
  }
1015
  else {
1016
    if(!(BACKGROUND_ON)){
1017
      /* No more task to schedule; set replenish amount */
1018
      SS_set_ra(l);
1019
    }
1020
  }
1021
}
1022
 
1023
static void SS_task_delay(LEVEL l, PID p, TIME usdelay)
1024
{
1025
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
1026
  struct timespec ty;
1027
  int tx;
1028
 
1029
  #ifdef DEBUG
1030
  kern_printf("SS_tdelay ");
1031
  #endif
1032
 
1033
  /* update the server capacity */
1034
  if (BACKGROUND_ON)
1035
    lev->flags &= ~SS_BACKGROUND;
1036
  else {
1037
 
1038
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
1039
    tx = TIMESPEC2USEC(&ty);
1040
    lev->availCs -= tx;
1041
    lev->replenish_amount += tx;
1042
    #ifdef DEBUG
1043
    kern_printf("PID:%d RA=%d ",p,lev->replenish_amount);
1044
    #endif
1045
 
1046
    /* Here set replenish amount because delay may be too long and
1047
       replenish time could arrive */
1048
    SS_set_ra(l);
1049
  }
1050
 
1051
  /* I hope no delay when owning a mutex... */
1052
  if (lev->activated == p)
1053
    level_table[ lev->scheduling_level ]->
1054
         guest_delay(lev->scheduling_level,p,usdelay);
1055
}
1056
 
1057
 
1058
/*-------------------------------------------------------------------*/
1059
 
1060
/***  Guest functions  ***/
1061
 
1062
 
1063
/* SS doesn't handles guest tasks */
1064
 
1065
static int SS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
1066
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; }
1067
 
1068
static void SS_guest_detach(LEVEL l, PID p)
1069
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
1070
 
1071
static void SS_guest_dispatch(LEVEL l, PID p, int nostop)
1072
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
1073
 
1074
static void SS_guest_epilogue(LEVEL l, PID p)
1075
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
1076
 
1077
static void SS_guest_activate(LEVEL l, PID p)
1078
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
1079
 
1080
static void SS_guest_insert(LEVEL l, PID p)
1081
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
1082
 
1083
static void SS_guest_extract(LEVEL l, PID p)
1084
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
1085
 
1086
static void SS_guest_endcycle(LEVEL l, PID p)
1087
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
1088
 
1089
static void SS_guest_end(LEVEL l, PID p)
1090
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
1091
 
1092
static void SS_guest_sleep(LEVEL l, PID p)
1093
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
1094
 
1095
static void SS_guest_delay(LEVEL l, PID p,DWORD tickdelay)
1096
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
1097
 
1098
 
1099
/*-------------------------------------------------------------------*/
1100
 
1101
/*** Registration functions ***/
1102
 
1103
 
1104
/*+ Registration function:
1105
    int flags                 the init flags ... see SS.h +*/
1106
void SS_register_level(int flags, LEVEL master, int Cs, int per)
1107
{
1108
  LEVEL l;            /* the level that we register */
1109
  SS_level_des *lev;  /* for readableness only */
1110
  PID i;              /* a counter */
1111
 
1112
  /* request an entry in the level_table */
1113
  l = level_alloc_descriptor();
1114
  #ifdef DEBUG
1115
  kern_printf("Alloc des %d ",l);
1116
  #endif
1117
 
1118
  /* alloc the space needed for the SS_level_des */
1119
  lev = (SS_level_des *)kern_alloc(sizeof(SS_level_des));
1120
 
1121
  /* update the level_table with the new entry */
1122
  level_table[l] = (level_des *)lev;
1123
 
1124
  /* fill the standard descriptor */
1125
  strncpy(lev->l.level_name,  SS_LEVELNAME, MAX_LEVELNAME);
1126
  lev->l.level_code               = SS_LEVEL_CODE;
1127
  lev->l.level_version            = SS_LEVEL_VERSION;
1128
 
1129
  lev->l.level_accept_task_model  = SS_level_accept_task_model;
1130
  lev->l.level_accept_guest_model = SS_level_accept_guest_model;
1131
  lev->l.level_status             = SS_level_status;
1132
 
1133
  if (flags & SS_ENABLE_BACKGROUND)
1134
    lev->l.level_scheduler          = SS_level_schedulerbackground;
1135
  else
1136
    lev->l.level_scheduler          = SS_level_scheduler;
1137
 
1138
  if (flags & SS_ENABLE_GUARANTEE_EDF)
1139
    lev->l.level_guarantee        = SS_level_guaranteeEDF;
1140
  else if (flags & SS_ENABLE_GUARANTEE_RM)
1141
    lev->l.level_guarantee        = SS_level_guaranteeRM;
1142
  else
1143
    lev->l.level_guarantee        = NULL;
1144
 
1145
  lev->l.task_create              = SS_task_create;
1146
  lev->l.task_detach              = SS_task_detach;
1147
  lev->l.task_eligible            = SS_task_eligible;
1148
  lev->l.task_dispatch            = SS_task_dispatch;
1149
  lev->l.task_epilogue            = SS_task_epilogue;
1150
  lev->l.task_activate            = SS_task_activate;
1151
  lev->l.task_insert              = SS_task_insert;
1152
  lev->l.task_extract             = SS_task_extract;
1153
  lev->l.task_endcycle            = SS_task_endcycle;
1154
  lev->l.task_end                 = SS_task_end;
1155
  lev->l.task_sleep               = SS_task_sleep;
1156
  lev->l.task_delay               = SS_task_delay;
1157
 
1158
  lev->l.guest_create             = SS_guest_create;
1159
  lev->l.guest_detach             = SS_guest_detach;
1160
  lev->l.guest_dispatch           = SS_guest_dispatch;
1161
  lev->l.guest_epilogue           = SS_guest_epilogue;
1162
  lev->l.guest_activate           = SS_guest_activate;
1163
  lev->l.guest_insert             = SS_guest_insert;
1164
  lev->l.guest_extract            = SS_guest_extract;
1165
  lev->l.guest_endcycle           = SS_guest_endcycle;
1166
  lev->l.guest_end                = SS_guest_end;
1167
  lev->l.guest_sleep              = SS_guest_sleep;
1168
  lev->l.guest_delay              = SS_guest_delay;
1169
 
1170
  /* fill the SS descriptor part */
1171
 
1172
  for (i=0; i<MAX_PROC; i++)
1173
     lev->nact[i] = -1;
1174
 
1175
  lev->Cs = Cs;
1176
  lev->availCs = Cs;
1177
 
1178
  lev->period = per;
1179
 
1180
  qq_init(&lev->wait);
1181
  lev->activated = NIL;
1182
 
1183
  lev->U = (MAX_BANDWIDTH / per) * Cs;
1184
 
1185
  lev->scheduling_level = master;
1186
 
1187
  lev->flags = flags & 0x07;
1188
 
1189
  /* This is superfluos. I do it for robustness */
1190
  for (i=0;i<SS_MAX_REPLENISH;lev->replenishment[i++]=0);
1191
 
1192
  /* Initialize replenishment stuff */
1193
  lev->rfirst=0;
1194
  lev->rlast=0;
1195
  lev->rcount=0;
1196
  lev->replenish_amount=0;
1197
  lev->server_active=SS_SERVER_NOTACTIVE;
1198
}
1199
 
1200
bandwidth_t SS_usedbandwidth(LEVEL l)
1201
{
1202
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
1203
  if (lev->l.level_code    == SS_LEVEL_CODE &&
1204
      lev->l.level_version == SS_LEVEL_VERSION)
1205
    return lev->U;
1206
  else
1207
    return 0;
1208
}
1209
 
1210
int SS_availCs(LEVEL l) {
1211
  SS_level_des *lev = (SS_level_des *)(level_table[l]);
1212
  if (lev->l.level_code    == SS_LEVEL_CODE &&
1213
      lev->l.level_version == SS_LEVEL_VERSION)
1214
    return lev->availCs;
1215
  else
1216
    return 0;
1217
}