Subversion Repositories shark

Rev

Rev 961 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
961 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
23
 CVS :        $Id: ps.c,v 1.1 2005-02-25 10:40:58 pj Exp $
24
 
25
 File:        $File$
26
 Revision:    $Revision: 1.1 $
27
 Last update: $Date: 2005-02-25 10:40:58 $
28
 ------------
29
 
30
 This file contains the aperiodic server PS (Polling Server)
31
 
32
 when scheduling in background  the flags field has the PS_BACKGROUND bit set
33
 
34
 when scheduling a task because it is pointed by another task via shadows,
35
 the task have to be extracted from the wait queue or the master level. To
36
 check this we have to look at the activated field; it is != NIL if a task
37
 is inserted into the master level. Only a task at a time can be inserted
38
 into the master level.
39
 
40
 The capacity of the server must be updated
41
 - when scheduling a task normally
42
 - when scheduling a task because it is pointed by a shadow
43
 but not when scheduling in background.
44
 
45
 When a task is extracted from the system no scheduling has to be done
46
 until the task reenter into the system. to implement this, when a task
47
 is extracted we block the background scheduling (the scheduling with the
48
 master level is already blocked because the activated field is not
49
 reset to NIL) using the PS_BACKGROUNDBLOCK bit.
50
 
51
 nact[p] is -1 if the task set the activations to SKIP, >= 0 otherwise
52
 
53
 Note that if the period event fires and there aren't any task to schedule,
54
 the server capacity is set to 0. This is correct, but there is a subtle
55
 variant: the server capacity may be set to 0 later because if at the
56
 period end the running task have priority > than the server, the capacity
57
 may be set to zero the first time the server become the highest priority
58
 running task and there aren't task to serve. The second implementation
59
 is more efficient but more complicated, because normally we don't know the
60
 priority of the running task.
61
 
62
 An implementation can be done in this way: when there are not task to
63
 schedule, we does not set the lev->activated field to nil, but to a "dummy"
64
 task that is inserted into the master level queue.
65
 When the master level scheduler try to schedule the "dummy" task (this is
66
 the situation in witch there are not task to schedule and the PS is the
67
 task with greater priority), it calls the PS_task_eligible, that set the
68
 server capacity to 0, remove the dummy task from the queue with a guest_end
69
 and ask to reschedule.
70
 
71
 Because this implementation is more complex than the first, I don't
72
 implement it... see (*), near line 169, 497 and 524
73
 
74
 
75
 Read PS.h for further details.
76
 
77
**/
78
 
79
/*
80
 * Copyright (C) 2000 Paolo Gai
81
 *
82
 * This program is free software; you can redistribute it and/or modify
83
 * it under the terms of the GNU General Public License as published by
84
 * the Free Software Foundation; either version 2 of the License, or
85
 * (at your option) any later version.
86
 *
87
 * This program is distributed in the hope that it will be useful,
88
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
89
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
90
 * GNU General Public License for more details.
91
 *
92
 * You should have received a copy of the GNU General Public License
93
 * along with this program; if not, write to the Free Software
94
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
95
 *
96
 */
97
 
98
 
99
#include <ps/ps/ps.h>
100
#include <ll/stdio.h>
101
#include <ll/string.h>
102
#include <kernel/model.h>
103
#include <kernel/descr.h>
104
#include <kernel/var.h>
105
#include <kernel/func.h>
106
 
107
#include <tracer.h>
108
 
109
/*+ Status used in the level +*/
110
#define PS_WAIT          APER_STATUS_BASE    /*+ waiting the service   +*/
111
 
112
/*+ the level redefinition for the Total Bandwidth Server level +*/
113
typedef struct {
114
  level_des l;     /*+ the standard level descriptor          +*/
115
 
116
  /* The wcet are stored in the task descriptor's priority
117
     field, so no other fields are needed                      */
118
 
119
  int nact[MAX_PROC]; /*+ number of pending activations       +*/
120
 
121
  struct timespec lastdline; /*+ the last deadline assigned to
122
                                 a PS task                    +*/
123
 
124
  int Cs;          /*+ server capacity                        +*/
125
  int availCs;     /*+ server avail time                      +*/
126
 
127
  IQUEUE wait;      /*+ the wait queue of the PS              +*/
128
  PID activated;   /*+ the task inserted in another queue     +*/
129
 
130
  int flags;       /*+ the init flags...                      +*/
131
 
132
  bandwidth_t U;   /*+ the used bandwidth by the server       +*/
133
  int period;
134
 
135
  LEVEL scheduling_level;
136
 
137
} PS_level_des;
138
 
139
/* This static function activates the task pointed by lev->activated) */
140
static __inline__ void PS_activation(PS_level_des *lev)
141
{
142
    PID p;                     /* for readableness    */
143
    JOB_TASK_MODEL j;          /* the guest model     */
144
    LEVEL m;                   /* the master level... only for readableness*/
145
 
146
    p = lev->activated;
147
    m = lev->scheduling_level;
148
    job_task_default_model(j,lev->lastdline);
149
    job_task_def_period(j,lev->period);
150
    level_table[m]->private_insert(m,p,(TASK_MODEL *)&j);
151
//    kern_printf("(%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec);
152
}
153
 
154
static void PS_deadline_timer(void *a)
155
{
156
  PS_level_des *lev = (PS_level_des *)(level_table[(LEVEL)a]);
157
 
158
  ADDUSEC2TIMESPEC(lev->period, &lev->lastdline);
159
 
160
//  kern_printf("(%d:%d %d)",lev->lastdline.tv_sec,lev->lastdline.tv_nsec, lev->period);
161
  if (lev->availCs >= 0)
162
    lev->availCs = lev->Cs;
163
  else
164
    lev->availCs += lev->Cs;
165
 
166
  /* availCs may be <0 because a task executed via a shadow fo many time
167
     lev->activated == NIL only if the prec task was finished and there
168
     was not any other task to be put in the ready queue
169
     ... we are now activating the next task */
170
  if (lev->availCs > 0 && lev->activated == NIL) {
171
      if (iq_query_first(&lev->wait) != NIL) {
172
        lev->activated = iq_getfirst(&lev->wait);
173
        PS_activation(lev);
174
        event_need_reschedule();
175
      }
176
      else
177
        lev->availCs = 0; /* see note (*) at the begin of the file */
178
  }
179
 
180
  kern_event_post(&lev->lastdline, PS_deadline_timer, a);
181
//  kern_printf("!");
182
}
183
 
184
static PID PS_public_schedulerbackground(LEVEL l)
185
{
186
  /* the PS catch the background time to exec aperiodic activities */
187
  PS_level_des *lev = (PS_level_des *)(level_table[l]);
188
 
189
  lev->flags |= PS_BACKGROUND;
190
 
191
  if (lev->flags & PS_BACKGROUND_BLOCK)
192
    return NIL;
193
  else
194
    return iq_query_first(&lev->wait);
195
}
196
 
197
/* The on-line guarantee is enabled only if the appropriate flag is set... */
198
static int PS_public_guaranteeEDF(LEVEL l, bandwidth_t *freebandwidth)
199
{
200
  PS_level_des *lev = (PS_level_des *)(level_table[l]);
201
 
202
  if (*freebandwidth >= lev->U) {
203
    *freebandwidth -= lev->U;
204
    return 1;
205
  }
206
  else
207
    return 0;
208
}
209
 
210
static int PS_public_guaranteeRM(LEVEL l, bandwidth_t *freebandwidth)
211
{
212
  PS_level_des *lev = (PS_level_des *)(level_table[l]);
213
 
214
  if (*freebandwidth > lev->U + RM_MINFREEBANDWIDTH) {
215
    *freebandwidth -= lev->U;
216
    return 1;
217
  }
218
  else
219
    return 0;
220
}
221
 
222
static int PS_public_create(LEVEL l, PID p, TASK_MODEL *m)
223
{
224
  PS_level_des *lev = (PS_level_des *)(level_table[l]);
225
  SOFT_TASK_MODEL *s;
226
 
227
  if (m->pclass != SOFT_PCLASS) return -1;
228
  if (m->level != 0 && m->level != l) return -1;
229
  s = (SOFT_TASK_MODEL *)m;
230
  if (s->periodicity != APERIODIC) return -1;
231
 
232
  s = (SOFT_TASK_MODEL *)m;
233
 
234
  if (s->arrivals == SAVE_ARRIVALS)
235
    lev->nact[p] = 0;
236
  else
237
    lev->nact[p] = -1;
238
 
239
  return 0; /* OK, also if the task cannot be guaranteed... */
240
}
241
 
242
static void PS_public_dispatch(LEVEL l, PID p, int nostop)
243
{
244
  PS_level_des *lev = (PS_level_des *)(level_table[l]);
245
  struct timespec ty;
246
 
247
//  if (nostop) kern_printf("NOSTOP!!!!!!!!!!!!");
248
  /* there is at least one task ready inserted in an EDF or similar
249
     level note that we can't check the status because the scheduler set it
250
     to exe before calling task_dispatch. we have to check
251
     lev->activated != p instead */
252
  if (lev->activated != p) {
253
    iq_extract(p, &lev->wait);
254
    //kern_printf("#%d#",p);
255
  }
256
  else {
257
    //if (nostop) kern_printf("(gd status=%d)",proc_table[p].status);
258
    level_table[ lev->scheduling_level ]->
259
      private_dispatch(lev->scheduling_level,p,nostop);
260
  }
261
 
262
  /* set the capacity timer */
263
  if (!nostop) {
264
    TIMESPEC_ASSIGN(&ty, &schedule_time);
265
    ADDUSEC2TIMESPEC(lev->availCs,&ty);
266
    cap_timer = kern_event_post(&ty, capacity_timer, NULL);
267
  }
268
 
269
//  kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec);
270
}
271
 
272
static void PS_public_epilogue(LEVEL l, PID p)
273
{
274
  PS_level_des *lev = (PS_level_des *)(level_table[l]);
275
  struct timespec ty;
276
  TIME tx;
277
 
278
  /* update the server capacity */
279
  if (lev->flags & PS_BACKGROUND)
280
    lev->flags &= ~PS_BACKGROUND;
281
  else {
282
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
283
    tx = TIMESPEC2USEC(&ty);
284
    lev->availCs -= tx;
285
  }
286
 
287
//  kern_printf("(epil %d %d)",lev->availCs, proc_table[p].avail_time);
288
 
289
  /* check if the server capacity is finished... */
290
  if (lev->availCs < 0) {
291
//    kern_printf("(epil Cs%d %d:%d act%d p%d)",
292
//              lev->availCs,proc_table[p].timespec_priority.tv_sec,
293
//              proc_table[p].timespec_priority.tv_nsec,
294
//              lev->activated,p);
295
    /* the server slice has finished... do the task_end!!!
296
       a first version of the module used the task_endcycle, but it was
297
       not conceptually correct because the task didn't stop because it
298
       finished all the work but because the server didn't have budget!
299
       So, if the task_endcycle is called, the task remain into the
300
       master level, and we can't wake him up if, for example, another
301
       task point the shadow to it!!!*/
302
    if (lev->activated == p)
303
      level_table[ lev->scheduling_level ]->
304
        private_extract(lev->scheduling_level,p);
305
    iq_insertfirst(p, &lev->wait);
306
    proc_table[p].status = PS_WAIT;
307
    lev->activated = NIL;
308
  }
309
  else
310
    /* the task has been preempted. it returns into the ready queue or to the
311
       wait queue by calling the guest_epilogue... */
312
    if (lev->activated == p) {//kern_printf("Û1");
313
      level_table[ lev->scheduling_level ]->
314
        private_epilogue(lev->scheduling_level,p);
315
    } else { //kern_printf("Û2");
316
      iq_insertfirst(p, &lev->wait);
317
      proc_table[p].status = PS_WAIT;
318
    }
319
}
320
 
321
static void PS_public_activate(LEVEL l, PID p, struct timespec *t)
322
{
323
  PS_level_des *lev = (PS_level_des *)(level_table[l]);
324
 
325
  if (lev->activated == p || proc_table[p].status == PS_WAIT) {
326
    if (lev->nact[p] != -1)
327
      lev->nact[p]++;
328
  }
329
  else if (proc_table[p].status == SLEEP) {
330
 
331
    if (lev->activated == NIL && lev->availCs > 0) {
332
      lev->activated = p;
333
      PS_activation(lev);
334
    }
335
    else {
336
      iq_insertlast(p, &lev->wait);
337
      proc_table[p].status = PS_WAIT;
338
    }
339
  }
340
  else
341
  {  kern_printf("PS_REJ%d %d %d %d ",p, proc_table[p].status, lev->activated, lev->wait.first);
342
     return; }
343
 
344
}
345
 
346
static void PS_public_unblock(LEVEL l, PID p)
347
{
348
  PS_level_des *lev = (PS_level_des *)(level_table[l]);
349
 
350
  lev->flags &= ~PS_BACKGROUND_BLOCK;
351
 
352
  lev->activated = -1;
353
 
354
  /* when we reinsert the task into the system, the server capacity
355
     is always 0 because nobody executes with the PS before... */
356
  iq_insertfirst(p, &lev->wait);
357
  proc_table[p].status = PS_WAIT;
358
}
359
 
360
static void PS_public_block(LEVEL l, PID p)
361
{
362
  PS_level_des *lev = (PS_level_des *)(level_table[l]);
363
 
364
  /* update the server capacity */
365
  lev->availCs = 0;
366
 
367
  lev->flags |= PS_BACKGROUND_BLOCK;
368
 
369
  if (lev->activated == p)
370
    level_table[ lev->scheduling_level ]->
371
      private_extract(lev->scheduling_level,p);
372
}
373
 
374
static int PS_public_message(LEVEL l, PID p, void *m)
375
{
376
  PS_level_des *lev = (PS_level_des *)(level_table[l]);
377
  struct timespec ty;
378
  TIME tx;
379
 
380
  /* update the server capacity */
381
  if (lev->flags & PS_BACKGROUND)
382
    lev->flags &= ~PS_BACKGROUND;
383
  else {
384
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
385
    tx = TIMESPEC2USEC(&ty);
386
    lev->availCs -= tx;
387
  }
388
 
389
  if (lev->activated == p)
390
    level_table[ lev->scheduling_level ]->
391
      private_extract(lev->scheduling_level,p);
392
  else
393
    iq_extract(p, &lev->wait);
394
 
395
  if (lev->nact[p] > 0)
396
  {
397
    lev->nact[p]--;
398
    iq_insertlast(p, &lev->wait);
399
    proc_table[p].status = PS_WAIT;
400
  }
401
  else
402
    proc_table[p].status = SLEEP;
403
 
404
  lev->activated = iq_getfirst(&lev->wait);
405
  if (lev->activated == NIL)
406
    lev->availCs = 0; /* see note (*) at the begin of the file */
407
  else
408
    PS_activation(lev);
409
 
410
  jet_update_endcycle(); /* Update the Jet data... */
411
  TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
412
 
413
  return 0;
414
}
415
 
416
static void PS_public_end(LEVEL l, PID p)
417
{
418
  PS_level_des *lev = (PS_level_des *)(level_table[l]);
419
  struct timespec ty;
420
  TIME tx;
421
 
422
  /* update the server capacity */
423
  if (lev->flags & PS_BACKGROUND)
424
    lev->flags &= ~PS_BACKGROUND;
425
  else {
426
    SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
427
    tx = TIMESPEC2USEC(&ty);
428
    lev->availCs -= tx;
429
  }
430
 
431
  if (lev->activated == p)
432
    level_table[ lev->scheduling_level ]->
433
      private_extract(lev->scheduling_level,p);
434
 
435
  proc_table[p].status = FREE;
436
  iq_insertfirst(p,&freedesc);
437
 
438
  lev->activated = iq_getfirst(&lev->wait);
439
  if (lev->activated == NIL)
440
    lev->availCs = 0; /* see note (*) at the begin of the file */
441
  else
442
    PS_activation(lev);
443
}
444
 
445
/* Registration functions */
446
 
447
 
448
/*+ This init function install the PS deadline timer
449
    +*/
450
static void PS_dline_install(void *l)
451
{
452
  PS_level_des *lev = (PS_level_des *)(level_table[(LEVEL)l]);
453
 
454
  kern_gettime(&lev->lastdline);
455
  ADDUSEC2TIMESPEC(lev->period, &lev->lastdline);
456
 
457
  kern_event_post(&lev->lastdline, PS_deadline_timer, l);
458
}
459
 
460
 
461
 
462
/*+ Registration function:
463
    int flags                 the init flags ... see PS.h +*/
464
LEVEL PS_register_level(int flags, LEVEL master, int Cs, int per)
465
{
466
  LEVEL l;            /* the level that we register */
467
  PS_level_des *lev;  /* for readableness only */
468
  PID i;              /* a counter */
469
 
470
  printk("PS_register_level\n");
471
 
472
  /* request an entry in the level_table */
473
  l = level_alloc_descriptor(sizeof(PS_level_des));
474
 
475
  lev = (PS_level_des *)level_table[l];
476
 
477
  printk("    lev=%d\n",(int)lev);
478
 
479
  /* fill the standard descriptor */
480
 
481
  if (flags & PS_ENABLE_BACKGROUND)
482
    lev->l.public_scheduler = PS_public_schedulerbackground;
483
 
484
  if (flags & PS_ENABLE_GUARANTEE_EDF)
485
    lev->l.public_guarantee = PS_public_guaranteeEDF;
486
  else if (flags & PS_ENABLE_GUARANTEE_RM)
487
    lev->l.public_guarantee = PS_public_guaranteeRM;
488
  else
489
    lev->l.public_guarantee = NULL;
490
 
491
  lev->l.public_create    = PS_public_create;
492
  lev->l.public_end       = PS_public_end;
493
  lev->l.public_dispatch  = PS_public_dispatch;
494
  lev->l.public_epilogue  = PS_public_epilogue;
495
  lev->l.public_activate  = PS_public_activate;
496
  lev->l.public_unblock   = PS_public_unblock;
497
  lev->l.public_block     = PS_public_block;
498
  lev->l.public_message   = PS_public_message;
499
 
500
  /* fill the PS descriptor part */
501
 
502
  for (i=0; i<MAX_PROC; i++)
503
     lev->nact[i] = -1;
504
 
505
  lev->Cs = Cs;
506
  lev->availCs = 0;
507
 
508
  lev->period = per;
509
 
510
  iq_init(&lev->wait, &freedesc, 0);
511
  lev->activated = NIL;
512
 
513
  lev->U = (MAX_BANDWIDTH / per) * Cs;
514
 
515
  lev->scheduling_level = master;
516
 
517
  lev->flags = flags & 0x07;
518
 
519
  sys_atrunlevel(PS_dline_install,(void *) l, RUNLEVEL_INIT);
520
 
521
  return l;
522
}
523
 
524
bandwidth_t PS_usedbandwidth(LEVEL l)
525
{
526
  PS_level_des *lev = (PS_level_des *)(level_table[l]);
527
 
528
  return lev->U;
529
}
530