Subversion Repositories shark

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
961 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors:
9
 *      Giacomo Guidi    <giacomo@gandalf.sssup.it>
10
 *      Mauro Marinoni
11
 *      Anton Cervin
12
 *
13
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
14
 *
15
 * http://www.sssup.it
16
 * http://retis.sssup.it
17
 * http://shark.sssup.it
18
 */
19
 
20
/*
21
 * This program is free software; you can redistribute it and/or modify
22
 * it under the terms of the GNU General Public License as published by
23
 * the Free Software Foundation; either version 2 of the License, or
24
 * (at your option) any later version.
25
 *
26
 * This program is distributed in the hope that it will be useful,
27
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
29
 * GNU General Public License for more details.
30
 *
31
 * You should have received a copy of the GNU General Public License
32
 * along with this program; if not, write to the Free Software
33
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
34
 *
35
 */
36
 
37
#include <kernel/model.h>
38
#include <kernel/descr.h>
39
#include <kernel/var.h>
40
#include <kernel/func.h>
41
 
42
#include <ll/i386/64bit.h>
43
 
44
#include <stdlib.h>
45
 
46
#include <elastic/elastic/elastic.h>
47
 
48
#include <tracer.h>
49
 
50
/* Task flags */
51
 
52
#define ELASTIC_PRESENT       1
53
#define ELASTIC_JOB_PRESENT   2  
54
 
55
/* Task statuses */
56
 
57
#define ELASTIC_IDLE          APER_STATUS_BASE
58
 
59
//#define ELASTIC_DEBUG
60
 
61
#ifdef ELASTIC_DEBUG
62
char *pnow() {
63
  static char buf[40];
64
  struct timespec t;
65
  kern_gettime(&t);
66
  sprintf(buf, "%ld.%06ld", t.tv_sec, t.tv_nsec/1000);
67
  return buf;
68
}
69
char *ptime1(struct timespec *t) {
70
  static char buf[40];
71
  sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
72
  return buf;
73
}
74
char *ptime2(struct timespec *t) {
75
  static char buf[40];
76
  sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
77
  return buf;
78
}
79
#endif
80
 
81
 
82
typedef struct {
83
 
84
  /* Task parameters (set/changed by the user) */
85
 
86
  TIME Tmin;   /* The nominal (minimum) period */
87
  TIME Tmax;   /* The maximum tolerable period */
88
  TIME C;      /* The declared worst-case execution time */
89
  int  E;      /* The elasticity coefficient */
90
  int  beta;   /* PERIOD_SCALING or WCET_SCALING */
91
 
92
  /* Task variables (changed by the module) */
93
 
94
  struct timespec release;    /* The current activation time */
95
  struct timespec dline;      /* The current absolute deadline */
96
  int dltimer;                /* Deadline timer handle */
97
 
98
  ext_bandwidth_t Umax;       /* The maximum utilization, Umax = C/Tmin  */
99
  ext_bandwidth_t Umin;       /* The minimum utilization, Umin = C/Tmax  */
100
 
101
  ext_bandwidth_t U;          /* New assigned utilization             */
102
  ext_bandwidth_t oldU;       /* Old utilization                      */
103
  TIME T;                     /* The current period, T = C/U          */
104
 
105
  int  flags;
106
 
107
} ELASTIC_task_descr;
108
 
109
typedef struct {
110
  level_des l;     /*+ the standard level descriptor          +*/
111
 
112
  ext_bandwidth_t U;   /*+ the bandwidth reserved for elastic tasks  +*/
113
 
114
  int c_scaling_factor;   /*+ the computation time scaling factor +*/
115
 
116
  ELASTIC_task_descr elist[MAX_PROC];
117
 
118
  LEVEL scheduling_level;
119
 
120
  LEVEL current_level;
121
 
122
  int flags;
123
 
124
} ELASTIC_level_des;
125
 
126
 
127
static void ELASTIC_activation(ELASTIC_level_des *lev, PID p,
128
                               struct timespec *acttime)
129
{
130
  JOB_TASK_MODEL job;
131
  ELASTIC_task_descr *et = &lev->elist[p];
132
 
133
  /* Assign release time */
134
  et->release = *acttime;
135
 
136
  /* Assign absolute deadline */
137
  et->dline = *acttime;
138
  ADDUSEC2TIMESPEC(et->T, &et->dline);
139
 
140
#ifdef ELASTIC_DEBUG
141
  /*  cprintf("At %s: activating %s; rel=%s; dl=%s\n", pnow(), proc_table[p].name,
142
      ptime1(&et->release), ptime2(&et->dline)); */
143
#endif  
144
 
145
  mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,proc_table[p].avail_time);
146
  mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,proc_table[p].wcet);
147
 
148
  /* Job insertion */
149
  job_task_default_model(job, et->dline);
150
  level_table[lev->scheduling_level]->
151
    private_insert(lev->scheduling_level, p, (TASK_MODEL *)&job);
152
  et->flags |= ELASTIC_JOB_PRESENT;
153
}
154
 
155
 
156
static void ELASTIC_timer_act(void *arg) {
157
 
158
  PID p = (PID)(arg);
159
  ELASTIC_level_des *lev;
160
 
161
  lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
162
  ELASTIC_task_descr *et = &lev->elist[p];
163
 
164
  /* Use the current deadline as the new activation time */
165
  ELASTIC_activation(lev, p, &et->dline);
166
 
167
  event_need_reschedule();
168
 
169
  /* Next activation */
170
  et->dltimer = kern_event_post(&et->dline, ELASTIC_timer_act, (void *)(p));
171
}
172
 
173
 
174
/* Check feasability and compute new utilizations for the task set */
175
 
176
static int ELASTIC_compress(ELASTIC_level_des *lev) {
177
 
178
  PID i;
179
  ELASTIC_task_descr *et;
180
  int ok;
181
 
182
  ext_bandwidth_t Umin;  // minimum utilization
183
  ext_bandwidth_t Umax;  // nominal (maximum) utilization of compressable tasks
184
  unsigned int temp;
185
 
186
  ext_bandwidth_t Uf;    // amount of non-compressable utilization
187
  int Ev;                // sum of elasticity among compressable tasks
188
 
189
  JOB_TASK_MODEL job;
190
 
191
  Umin = 0;
192
  Umax = 0;
193
 
194
  for (i=0; i<MAX_PROC; i++) {
195
    et = &lev->elist[i];
196
    if (et->flags & ELASTIC_PRESENT) {
197
      if (et->E == 0) {
198
        Umin += et->U;
199
        Umax += et->U;
200
      } else {
201
        Umin += et->Umin;
202
        Umax += et->Umax;
203
        et->U = et->Umax;   // reset previous saturations (if any)
204
      }
205
    }
206
  }
207
 
208
  if (Umin > lev->U) {
209
#ifdef ELASTIC_DEBUG
210
    cprintf("ELASTIC_compress: Task set not feasible\n");
211
#endif
212
    return -1;  // NOT FEASIBLE
213
  }
214
 
215
  if (Umax <= lev->U) {
216
#ifdef ELASTIC_DEBUG
217
    cprintf("ELASTIC_compress: Task set feasible with maximum utilizations\n");
218
#endif
219
 
220
  } else {
221
 
222
    do {
223
      Uf = 0;
224
      Ev = 0;
225
      Umax = 0;
226
 
227
      for (i=0; i<MAX_PROC; i++) {
228
        et = &lev->elist[i];
229
        if (et->flags & ELASTIC_PRESENT) {
230
          if (et->E == 0 || et->U == et->Umin) {
231
            Uf += et->U;
232
          } else {
233
            Ev += et->E;
234
            Umax += et->Umax;
235
          }
236
        }
237
      }
238
 
239
      ok = 1;
240
 
241
      for (i=0; i<MAX_PROC; i++) {
242
        et = &lev->elist[i];
243
        if (et->flags & ELASTIC_PRESENT) {
244
          if (et->E > 0 && et->U > et->Umin) {
245
            et->U = et->Umax - (Umax - lev->U + Uf) * et->E / Ev;
246
            if (et->U < et->Umin) {
247
              et->U = et->Umin;
248
              ok = 0;
249
            }
250
          }
251
        }
252
      }
253
 
254
    } while (ok == 0);
255
  }
256
 
257
  // Increase periods of compressed tasks IMMEDIATELY.
258
  // The other ones will be changed at their next activation
259
 
260
  for (i=0; i<MAX_PROC; i++) {
261
    et = &lev->elist[i];
262
    if (et->flags & ELASTIC_PRESENT) {
263
      if (et->U != et->oldU) {
264
        /* Utilization has been changed. Compute new period */
265
        temp = (long long)et->C * (long long)MAX_BANDWIDTH / et->U;
266
        mul32div32to32(temp,lev->c_scaling_factor,SCALING_UNIT,et->T);
267
      }
268
      if (et->U < et->oldU) {
269
        /* Task has been compressed. Change its deadline NOW! */
270
        if (et->flags & ELASTIC_JOB_PRESENT) {
271
          /* Remove job from level */
272
          level_table[lev->scheduling_level]->
273
            private_extract(lev->scheduling_level, i);
274
        }
275
        /* Compute new deadline */
276
        et->dline = et->release;
277
        ADDUSEC2TIMESPEC(et->T, &et->dline);
278
        if (et->dltimer != -1) {
279
          /* Delete old deadline timer, post new one */
280
          kern_event_delete(et->dltimer);
281
          et->dltimer = kern_event_post(&et->dline, ELASTIC_timer_act,(void *)(i));
282
        }
283
        if (et->flags & ELASTIC_JOB_PRESENT) {
284
          /* Reinsert job */
285
          job_task_default_model(job, et->dline);
286
          level_table[lev->scheduling_level]->
287
            private_insert(lev->scheduling_level, i, (TASK_MODEL *)&job);
288
        }
289
      }
290
      et->oldU = et->U;  /* Update oldU */
291
    }
292
  }
293
 
294
#ifdef ELASTIC_DEBUG
295
  cprintf("ELASTIC_compress: New periods: ");
296
  for (i=0; i<MAX_PROC; i++) {
297
    et = &lev->elist[i];
298
    if (et->flags & ELASTIC_PRESENT) {
299
      cprintf("%s:%d ", proc_table[i].name, (int)et->T);
300
    }
301
  }
302
  cprintf("\n");
303
#endif
304
 
305
  return 0; // FEASIBLE
306
 
307
}
308
 
309
 
310
/* The on-line guarantee is enabled only if the appropriate flag is set... */
311
static int ELASTIC_public_guarantee(LEVEL l, bandwidth_t *freebandwidth)
312
{
313
  ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
314
 
315
  if (*freebandwidth >= lev->U) {
316
    *freebandwidth -= (unsigned int)lev->U;
317
    return 1;
318
  } else {
319
    return 0;
320
  }
321
 
322
}
323
 
324
 
325
static int ELASTIC_public_create(LEVEL l, PID p, TASK_MODEL *m)
326
{
327
  ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
328
  ELASTIC_TASK_MODEL *elastic = (ELASTIC_TASK_MODEL *)m;
329
  ELASTIC_task_descr *et = &lev->elist[p];
330
  unsigned int temp;
331
 
332
  if (m->pclass != ELASTIC_PCLASS) return -1;
333
  if (m->level != 0 && m->level != l) return -1;
334
 
335
  if (elastic->C == 0) return -1;
336
  if (elastic->Tmin > elastic->Tmax) return -1;
337
  if (elastic->Tmax == 0) return -1;
338
  if (elastic->Tmin == 0) return -1;
339
 
340
  NULL_TIMESPEC(&(et->dline));
341
  et->Tmin = elastic->Tmin;
342
  et->Tmax = elastic->Tmax;
343
  et->C = elastic->C;
344
  et->E = elastic->E;
345
  et->beta = elastic->beta;
346
 
347
  mul32div32to32(elastic->C,lev->c_scaling_factor,SCALING_UNIT,temp);
348
  et->Umax = ((long long)MAX_BANDWIDTH * (long long)temp) / (long long)elastic->Tmin;
349
  et->Umin = ((long long)MAX_BANDWIDTH * (long long)temp) / (long long)elastic->Tmax;
350
 
351
  et->U = et->Umax;
352
  et->oldU = 0;
353
  et->T = et->Tmin;
354
  et->dltimer = -1;
355
 
356
  et->flags |= ELASTIC_PRESENT;
357
  if (ELASTIC_compress(lev) == -1) {
358
    et->flags &= ~ELASTIC_PRESENT;
359
#ifdef ELASTIC_DEBUG
360
    cprintf("ELASTIC_public_create: compression failed!\n");
361
#endif
362
    return -1;
363
  }
364
 
365
  mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,proc_table[p].avail_time);
366
  mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,proc_table[p].wcet);
367
 
368
  proc_table[p].control    |= CONTROL_CAP;
369
 
370
  return 0;
371
}
372
 
373
 
374
static void ELASTIC_public_detach(LEVEL l, PID p)
375
{
376
  //ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
377
 
378
}
379
 
380
static int ELASTIC_public_eligible(LEVEL l, PID p)
381
{
382
  //ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
383
 
384
  return 0;
385
 
386
}
387
 
388
static void ELASTIC_public_dispatch(LEVEL l, PID p, int nostop)
389
{
390
  ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
391
 
392
  level_table[ lev->scheduling_level ]->
393
    private_dispatch(lev->scheduling_level,p,nostop);
394
 
395
}
396
 
397
static void ELASTIC_public_epilogue(LEVEL l, PID p)
398
{
399
  ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
400
 
401
  /* check if the wcet is finished... */
402
  if (proc_table[p].avail_time <= 0) {
403
 
404
    TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,
405
                    (unsigned short int)proc_table[p].context,0);
406
    kern_raise(XWCET_VIOLATION,p);
407
 
408
  }
409
 
410
  level_table[lev->scheduling_level]->
411
      private_epilogue(lev->scheduling_level,p);
412
 
413
}
414
 
415
static void ELASTIC_public_activate(LEVEL l, PID p, struct timespec *t)
416
{
417
  ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
418
  ELASTIC_task_descr *et = &lev->elist[p];
419
 
420
  /* check if we are not in the SLEEP state */
421
  if (proc_table[p].status != SLEEP) {
422
    return;
423
  }
424
 
425
  ELASTIC_activation(lev,p,t);
426
 
427
  /* Next activation */
428
  et->dltimer = kern_event_post(&et->dline, ELASTIC_timer_act, (void *)(p));
429
 
430
}
431
 
432
static void ELASTIC_public_unblock(LEVEL l, PID p)
433
{
434
  ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
435
  struct timespec acttime;
436
 
437
  kern_gettime(&acttime);
438
 
439
  ELASTIC_activation(lev,p,&acttime);
440
 
441
}
442
 
443
static void ELASTIC_public_block(LEVEL l, PID p)
444
{
445
  ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
446
  ELASTIC_task_descr *et = &lev->elist[p];
447
 
448
  level_table[lev->scheduling_level]->
449
    private_extract(lev->scheduling_level,p);
450
  et->flags &= ~ELASTIC_JOB_PRESENT;
451
 
452
}
453
 
454
static int ELASTIC_public_message(LEVEL l, PID p, void *m)
455
{
456
  ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
457
  ELASTIC_task_descr *et = &lev->elist[p];
458
 
459
  switch((long)(m)) {
460
 
461
    case (long)(NULL):
462
 
463
      level_table[lev->scheduling_level]->
464
        private_extract(lev->scheduling_level,p);
465
      et->flags &= ~ELASTIC_JOB_PRESENT;
466
 
467
      proc_table[p].status = ELASTIC_IDLE;
468
 
469
      jet_update_endcycle(); /* Update the Jet data... */
470
      TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
471
 
472
      break;
473
 
474
    case 1:
475
 
476
      if (et->dltimer != -1)
477
        kern_event_delete(et->dltimer);
478
 
479
      if (et->flags & ELASTIC_JOB_PRESENT) {
480
        level_table[ lev->scheduling_level ]->
481
          private_extract(lev->scheduling_level,p);
482
        et->flags &= ~ELASTIC_JOB_PRESENT;
483
      }
484
 
485
      proc_table[p].status = SLEEP;
486
 
487
      TRACER_LOGEVENT(FTrace_EVT_task_disable,(unsigned short int)proc_table[p].context,(unsigned int)l);
488
 
489
      break;
490
 
491
  }
492
 
493
  return 0;
494
 
495
}
496
 
497
static void ELASTIC_public_end(LEVEL l, PID p)
498
{
499
  ELASTIC_level_des *lev = (ELASTIC_level_des *)(level_table[l]);
500
  ELASTIC_task_descr *et = &lev->elist[p];
501
 
502
  if (et->dltimer != -1) {
503
    kern_event_delete(et->dltimer);
504
  }
505
 
506
  if (et->flags & ELASTIC_JOB_PRESENT) {
507
    level_table[ lev->scheduling_level ]->
508
      private_extract(lev->scheduling_level,p);
509
    et->flags &= ~ELASTIC_JOB_PRESENT;
510
  }
511
 
512
  et->flags &= ~ELASTIC_PRESENT;
513
 
514
  ELASTIC_compress(lev); // Tasks may want to expand
515
}
516
 
517
/*+ Registration function +*/
518
LEVEL ELASTIC_register_level(int flags, LEVEL master, ext_bandwidth_t U)
519
{
520
  LEVEL l;            /* the level that we register */
521
  ELASTIC_level_des *lev;  /* for readableness only */
522
  PID i;
523
 
524
  printk("ELASTIC_register_level\n");
525
 
526
  /* request an entry in the level_table */
527
  l = level_alloc_descriptor(sizeof(ELASTIC_level_des));
528
 
529
  lev = (ELASTIC_level_des *)level_table[l];
530
 
531
  /* fill the standard descriptor */
532
  if (flags & ELASTIC_ENABLE_GUARANTEE)
533
    lev->l.public_guarantee = ELASTIC_public_guarantee;
534
  else
535
    lev->l.public_guarantee = NULL;
536
  lev->l.public_create    = ELASTIC_public_create;
537
  lev->l.public_detach    = ELASTIC_public_detach;
538
  lev->l.public_end       = ELASTIC_public_end;
539
  lev->l.public_eligible  = ELASTIC_public_eligible;
540
  lev->l.public_dispatch  = ELASTIC_public_dispatch;
541
  lev->l.public_epilogue  = ELASTIC_public_epilogue;
542
  lev->l.public_activate  = ELASTIC_public_activate;
543
  lev->l.public_unblock   = ELASTIC_public_unblock;
544
  lev->l.public_block     = ELASTIC_public_block;
545
  lev->l.public_message   = ELASTIC_public_message;
546
 
547
  /* fill the ELASTIC task descriptor part */
548
  for (i=0; i<MAX_PROC; i++) {
549
     NULL_TIMESPEC(&(lev->elist[i].dline));
550
     lev->elist[i].Tmin = 0;
551
     lev->elist[i].Tmax = 0;
552
     lev->elist[i].T = 0;
553
     lev->elist[i].U = 0;
554
     lev->elist[i].C = 0;
555
     lev->elist[i].E = 0;
556
     lev->elist[i].beta = 0;
557
     lev->elist[i].flags = 0;
558
  }
559
 
560
  lev->c_scaling_factor = SCALING_UNIT;
561
 
562
  lev->U = U;
563
 
564
  lev->scheduling_level = master;
565
 
566
  lev->current_level = l;
567
 
568
  lev->flags = 0;
569
 
570
  return l;
571
}
572
 
573
 
574
 
575
/* Force the period of task p to a given value (between Tmin and Tmax) */
576
 
577
int ELASTIC_set_period(PID p, TIME period) {
578
 
579
  SYS_FLAGS f;
580
  int saveE;          
581
  unsigned int temp;
582
  ext_bandwidth_t saveU;
583
  TIME saveT;
584
 
585
  f = kern_fsave();
586
 
587
  ELASTIC_level_des *lev;
588
  lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
589
  ELASTIC_task_descr *et = &lev->elist[p];
590
 
591
  if (period < et->Tmin || period > et->Tmax) {
592
    kern_frestore(f);
593
    return -1;
594
  }
595
 
596
  saveE = et->E;
597
  saveU = et->U;
598
  saveT = et->T;
599
 
600
  et->E = 0;  /* set elasticity to zero to force period */
601
  mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,temp);
602
  et->U = ((long long)MAX_BANDWIDTH * (long long)(temp))/((long long)period);
603
  et->T = period;
604
 
605
  if (ELASTIC_compress(lev) == -1) {
606
#ifdef ELASTIC_DEBUG
607
    cprintf("ELASTIC_set_period failed!\n");
608
#endif
609
    et->E = saveE;
610
    et->U = saveU;
611
    et->T = saveT;
612
    kern_frestore(f);
613
    return -1;
614
  }
615
 
616
  et->E = saveE;     /* Restore E when compression is done */
617
  kern_frestore(f);
618
  return 0;
619
}
620
 
621
int ELASTIC_get_period(PID p) {
622
 
623
  SYS_FLAGS f;
624
  ELASTIC_level_des *lev;
625
  lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
626
  TIME retval;
627
 
628
  f = kern_fsave();
629
 
630
  if (lev->elist[p].flags & ELASTIC_PRESENT) {  
631
    retval = lev->elist[p].T;
632
    kern_frestore(f);
633
    return retval;
634
 
635
  } else {
636
 
637
    kern_frestore(f);
638
    return -1;
639
 
640
  }
641
 
642
}
643
 
644
 
645
int ELASTIC_set_Tmin(PID p, TIME Tmin)
646
{
647
  SYS_FLAGS f;
648
  ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
649
  ELASTIC_task_descr *et = &lev->elist[p];
650
  TIME saveTmin;
651
  TIME saveT;
652
  ext_bandwidth_t saveU;
653
  unsigned int temp;
654
 
655
  f = kern_fsave();
656
 
657
  if (et->flags & ELASTIC_PRESENT) {
658
 
659
    saveTmin = et->Tmin;
660
    saveT = et->T;
661
    saveU = et->U;
662
 
663
    et->Tmin = Tmin;
664
    if (Tmin > et->T) {
665
      et->T = Tmin;
666
      mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,temp);
667
      et->U = ((long long)MAX_BANDWIDTH * (long long)(temp))/((long long)Tmin);
668
    }
669
 
670
    if (ELASTIC_compress(lev) == -1) {
671
#ifdef ELASTIC_DEBUG
672
      cprintf("ELASTIC_set_Tmin failed: could not compress\n");
673
#endif
674
      et->Tmin = saveTmin;
675
      et->T = saveT;
676
      et->U = saveU;
677
      kern_frestore(f);
678
      return -1;
679
    }
680
 
681
    kern_frestore(f);
682
    return 0;
683
 
684
  } else {
685
 
686
    kern_frestore(f);
687
    return -1;
688
  }
689
}
690
 
691
 
692
int ELASTIC_get_Tmin(PID p) {
693
 
694
  SYS_FLAGS f;
695
  ELASTIC_level_des *lev;
696
  lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
697
  TIME retval;
698
 
699
  f = kern_fsave();
700
 
701
  if (lev->elist[p].flags & ELASTIC_PRESENT) {  
702
    retval = lev->elist[p].Tmin;
703
    kern_frestore(f);
704
    return retval;
705
 
706
  } else {
707
 
708
    kern_frestore(f);
709
    return -1;
710
 
711
  }
712
 
713
}
714
 
715
 
716
int ELASTIC_set_Tmax(PID p, TIME Tmax)
717
{
718
  SYS_FLAGS f;
719
  ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
720
  ELASTIC_task_descr *et = &lev->elist[p];
721
  TIME saveTmax;
722
  TIME saveT;
723
  ext_bandwidth_t saveU;
724
  unsigned int temp;
725
 
726
  f = kern_fsave();
727
 
728
  if (et->flags & ELASTIC_PRESENT) {
729
 
730
    saveTmax = et->Tmax;
731
    saveT = et->T;
732
    saveU = et->U;
733
 
734
    et->Tmax = Tmax;
735
    if (Tmax < et->T) {
736
      et->T = Tmax;
737
      mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,temp);
738
      et->U = ((long long)MAX_BANDWIDTH * (long long)(temp))/((long long)Tmax);
739
    }
740
 
741
    if (ELASTIC_compress(lev) == -1) {
742
#ifdef ELASTIC_DEBUG
743
      cprintf("ELASTIC_set_Tmax failed: could not compress\n");
744
#endif
745
      et->Tmax = saveTmax;
746
      et->T = saveT;
747
      et->U = saveU;
748
      kern_frestore(f);
749
      return -1;
750
    }
751
 
752
    kern_frestore(f);
753
    return 0;
754
 
755
  } else {
756
 
757
    kern_frestore(f);
758
    return -1;
759
  }
760
}
761
 
762
 
763
int ELASTIC_get_Tmax(PID p) {
764
 
765
  SYS_FLAGS f;
766
  ELASTIC_level_des *lev;
767
  lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
768
  TIME retval;
769
 
770
  f = kern_fsave();
771
 
772
  if (lev->elist[p].flags & ELASTIC_PRESENT) {  
773
    retval = lev->elist[p].Tmax;
774
    kern_frestore(f);
775
    return retval;
776
 
777
  } else {
778
 
779
    kern_frestore(f);
780
    return -1;
781
 
782
  }
783
 
784
}
785
 
786
int ELASTIC_set_C(PID p, TIME C)
787
{
788
  SYS_FLAGS f;
789
  ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
790
  ELASTIC_task_descr *et = &lev->elist[p];
791
  TIME saveC;
792
  ext_bandwidth_t saveU;
793
  unsigned int temp;
794
 
795
  f = kern_fsave();
796
 
797
  if (et->flags & ELASTIC_PRESENT) {
798
 
799
    saveC = et->C;
800
    saveU = et->U;
801
 
802
    et->C = C;
803
 
804
    mul32div32to32(et->C,lev->c_scaling_factor,SCALING_UNIT,temp);
805
    et->U = ((long long)MAX_BANDWIDTH * (long long)(temp))/((long long)et->T);
806
 
807
    if (ELASTIC_compress(lev) == -1) {
808
#ifdef ELASTIC_DEBUG
809
      cprintf("ELASTIC_set_C failed: could not compress\n");
810
#endif
811
      et->C = saveC;
812
      et->U = saveU;
813
      kern_frestore(f);
814
      return -1;
815
    }
816
 
817
    kern_frestore(f);
818
    return 0;
819
 
820
  } else {
821
 
822
    kern_frestore(f);
823
    return -1;
824
  }
825
}
826
 
827
 
828
int ELASTIC_get_C(PID p) {
829
 
830
  SYS_FLAGS f;
831
  ELASTIC_level_des *lev;
832
  lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
833
  TIME retval;
834
 
835
  f = kern_fsave();
836
 
837
  if (lev->elist[p].flags & ELASTIC_PRESENT) {  
838
    retval = lev->elist[p].C;
839
    kern_frestore(f);
840
    return retval;
841
 
842
  } else {
843
 
844
    kern_frestore(f);
845
    return -1;
846
 
847
  }
848
 
849
}
850
 
851
 
852
int ELASTIC_set_E(PID p, int E)
853
{
854
  SYS_FLAGS f;
855
  ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
856
  ELASTIC_task_descr *et = &lev->elist[p];
857
  int saveE;
858
 
859
  f = kern_fsave();
860
 
861
  if (et->flags & ELASTIC_PRESENT) {
862
 
863
    saveE = et->E;
864
 
865
    et->E = E;
866
    if (ELASTIC_compress(lev) == -1) {
867
#ifdef ELASTIC_DEBUG
868
      cprintf("ELASTIC_set_E failed: could not compress\n");
869
#endif
870
      et->E = saveE;
871
      kern_frestore(f);
872
      return -1;
873
    }
874
 
875
    kern_frestore(f);
876
    return 0;
877
 
878
  } else {
879
 
880
    kern_frestore(f);
881
    return -1;
882
  }
883
}
884
 
885
int ELASTIC_get_E(PID p) {
886
 
887
  SYS_FLAGS f;
888
  ELASTIC_level_des *lev;
889
  lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
890
 
891
  f = kern_fsave();
892
 
893
  if (lev->elist[p].flags & ELASTIC_PRESENT) {
894
 
895
    kern_frestore(f);
896
    return lev->elist[p].E;
897
 
898
  } else {
899
 
900
    kern_frestore(f);
901
    return -1;
902
  }
903
}
904
 
905
int ELASTIC_set_beta(PID p, int beta) {
906
 
907
  SYS_FLAGS f;
908
  ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
909
  ELASTIC_task_descr *et = &lev->elist[p];
910
  int saveBeta;
911
 
912
  f = kern_fsave();
913
 
914
  if (et->flags & ELASTIC_PRESENT) {
915
 
916
    saveBeta = et->beta;
917
 
918
    et->beta = beta;
919
 
920
    if (ELASTIC_compress(lev) == -1) {
921
#ifdef ELASTIC_DEBUG
922
      cprintf("ELASTIC_set_beta failed: could not compress\n");
923
#endif
924
      et->beta = saveBeta;
925
      kern_frestore(f);
926
      return -1;
927
    }
928
 
929
    kern_frestore(f);
930
    return 0;
931
 
932
  } else {
933
 
934
    kern_frestore(f);
935
    return -1;
936
 
937
  }
938
 
939
}
940
 
941
int ELASTIC_get_beta(PID p) {
942
 
943
  SYS_FLAGS f;
944
  ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[proc_table[p].task_level];
945
  int retval;
946
 
947
  f = kern_fsave();
948
 
949
  if (lev->elist[p].flags & ELASTIC_PRESENT) {
950
    retval = lev->elist[p].beta;
951
    kern_frestore(f);
952
    return retval;
953
 
954
  } else {
955
 
956
    kern_frestore(f);
957
    return -1;
958
 
959
  }
960
 
961
}
962
 
963
int ELASTIC_set_bandwidth(LEVEL level, ext_bandwidth_t U) {
964
 
965
  SYS_FLAGS f;
966
  ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[level];
967
 
968
  f = kern_fsave();
969
 
970
  lev->U = U;
971
 
972
  if (ELASTIC_compress(lev) == -1) {
973
#ifdef ELASTIC_DEBUG
974
    cprintf("ELASTIC_set_bandwidth failed: could not compress\n");
975
#endif
976
    kern_frestore(f);
977
    return -1;
978
  }
979
 
980
  kern_frestore(f);
981
  return 0;
982
 
983
}
984
 
985
ext_bandwidth_t ELASTIC_get_bandwidth(LEVEL level) {
986
 
987
  ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[level];;
988
 
989
  return lev->U;
990
 
991
}
992
 
993
int ELASTIC_set_scaling_factor(LEVEL level, int scaling_factor) {
994
 
995
  SYS_FLAGS f;
996
  ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[level];
997
 
998
  f = kern_fsave();
999
 
1000
  lev->c_scaling_factor = scaling_factor;
1001
 
1002
  if (ELASTIC_compress(lev) == -1) {
1003
#ifdef ELASTIC_DEBUG
1004
    cprintf("ELASTIC_set_scaling_factor failed: could not compress\n");
1005
#endif
1006
    kern_frestore(f);
1007
    return -1;
1008
  }
1009
 
1010
  kern_frestore(f);
1011
  return 0;
1012
 
1013
}
1014
 
1015
int ELASTIC_get_scaling_factor(LEVEL level) {
1016
 
1017
  ELASTIC_level_des *lev = (ELASTIC_level_des *)level_table[level];;
1018
 
1019
  return lev->c_scaling_factor;
1020
 
1021
}