Subversion Repositories shark

Rev

Rev 3 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
23
 CVS :        $Id: rr.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
24
 
25
 File:        $File$
26
 Revision:    $Revision: 1.1.1.1 $
27
 Last update: $Date: 2002-03-29 14:12:52 $
28
 ------------
29
 
30
 This file contains the scheduling module RR (Round Robin)
31
 
32
 Read rr.h for further details.
33
 
34
**/
35
 
36
/*
37
 * Copyright (C) 2000 Paolo Gai
38
 *
39
 * This program is free software; you can redistribute it and/or modify
40
 * it under the terms of the GNU General Public License as published by
41
 * the Free Software Foundation; either version 2 of the License, or
42
 * (at your option) any later version.
43
 *
44
 * This program is distributed in the hope that it will be useful,
45
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
46
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
47
 * GNU General Public License for more details.
48
 *
49
 * You should have received a copy of the GNU General Public License
50
 * along with this program; if not, write to the Free Software
51
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
52
 *
53
 */
54
 
55
 
56
#include <modules/rr.h>
57
#include <ll/stdio.h>
58
#include <ll/string.h>
59
#include <kernel/model.h>
60
#include <kernel/descr.h>
61
#include <kernel/var.h>
62
#include <kernel/func.h>
63
 
64
/*+ Status used in the level +*/
65
#define RR_READY   MODULE_STATUS_BASE
66
#define RR_DELAY   MODULE_STATUS_BASE+1
67
 
68
/*+ the level redefinition for the Round Robin level +*/
69
typedef struct {
70
  level_des l;     /*+ the standard level descriptor          +*/
71
 
72
  QQUEUE ready;    /*+ the ready queue                        +*/
73
 
74
  int slice;       /*+ the level's time slice                 +*/
75
 
76
  struct multiboot_info *multiboot; /*+ used if the level have to insert
77
                                        the main task +*/
78
} RR_level_des;
79
 
80
 
81
static char *RR_status_to_a(WORD status)
82
{
83
  if (status < MODULE_STATUS_BASE)
84
    return status_to_a(status);
85
 
86
  switch (status) {
87
    case RR_READY: return "RR_Ready";
88
    case RR_DELAY: return "RR_Delay";
89
    default      : return "RR_Unknown";
90
  }
91
}
92
 
93
/*+ this function is called when a task finish his delay +*/
94
static void RR_timer_delay(void *par)
95
{
96
  PID p = (PID) par;
97
  RR_level_des *lev;
98
 
99
  lev = (RR_level_des *)level_table[proc_table[p].task_level];
100
 
101
  proc_table[p].status = RR_READY;
102
  qq_insertlast(p,&lev->ready);
103
 
104
  proc_table[p].delay_timer = NIL;  /* Paranoia */
105
 
106
//  kern_printf(" DELAY TIMER %d ", p);
107
 
108
  event_need_reschedule();
109
}
110
 
111
 
112
static int RR_level_accept_task_model(LEVEL l, TASK_MODEL *m)
113
{
114
  if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))
115
    return 0;
116
  else
117
    return -1;
118
}
119
 
120
static int RR_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
121
{
122
    return -1;
123
}
124
 
125
static void RR_level_status(LEVEL l)
126
{
127
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
128
  PID p = qq_queryfirst(&lev->ready);
129
 
130
  kern_printf("Slice: %d \n", lev->slice);
131
 
132
  while (p != NIL) {
133
    kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
134
              RR_status_to_a(proc_table[p].status));
135
    p = proc_table[p].next;
136
  }
137
 
138
  for (p=0; p<MAX_PROC; p++)
139
    if (proc_table[p].task_level == l && proc_table[p].status != RR_READY
140
        && proc_table[p].status != FREE )
141
      kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
142
                RR_status_to_a(proc_table[p].status));
143
 
144
}
145
 
146
 
147
/* This is not efficient but very fair :-)
148
   The need of all this stuff is because if a task execute a long time
149
   due to (shadow!) priority inheritance, then the task shall go to the
150
   tail of the queue many times... */
151
static PID RR_level_scheduler(LEVEL l)
152
{
153
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
154
 
155
  PID p;
156
 
157
  for (;;) {
158
    p = qq_queryfirst(&lev->ready);
159
    if (p == -1)
160
      return p;
161
 
162
    if (proc_table[p].avail_time <= 0) {
163
      proc_table[p].avail_time += proc_table[p].wcet;
164
      qq_extract(p,&lev->ready);
165
      qq_insertlast(p,&lev->ready);
166
    }
167
    else
168
      return p;
169
  }
170
}
171
 
172
static int RR_level_guarantee(LEVEL l, bandwidth_t *freebandwidth)
173
{
174
  /* the RR level always guarantee... the function is defined because
175
     there can be an aperiodic server at a level with less priority than
176
     the RR that need guarantee (e.g., a TBS server) */
177
  return 1;
178
}
179
 
180
 
181
static int RR_task_create(LEVEL l, PID p, TASK_MODEL *m)
182
{
183
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
184
  NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m;
185
 
186
  /* the task state is set at SLEEP by the general task_create
187
     the only thing to set remains the capacity stuffs that are set
188
     to the values passed in the model... */
189
 
190
  /* I used the wcet field because using wcet can account if a task
191
     consume more than the timeslice... */
192
 
193
  if (nrt->slice) {
194
    proc_table[p].avail_time = nrt->slice;
195
    proc_table[p].wcet       = nrt->slice;
196
  }
197
  else {
198
    proc_table[p].avail_time = lev->slice;
199
    proc_table[p].wcet       = lev->slice;
200
  }
201
  proc_table[p].control   |= CONTROL_CAP;
202
 
203
  return 0; /* OK */
204
}
205
 
206
static void RR_task_detach(LEVEL l, PID p)
207
{
208
  /* the RR level doesn't introduce any new field in the TASK_MODEL
209
     so, all detach stuffs are done by the task_create
210
     The task state is set at FREE by the general task_create */
211
}
212
 
213
static int RR_task_eligible(LEVEL l, PID p)
214
{
215
  return 0; /* if the task p is chosen, it is always eligible */
216
}
217
 
218
#ifdef __TEST1__
219
extern int testactive;
220
extern struct timespec s_stime[];
221
extern TIME s_curr[];
222
extern TIME s_PID[];
223
extern int useds;
224
#endif
225
 
226
static void RR_task_dispatch(LEVEL l, PID p, int nostop)
227
{
228
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
229
 
230
  /* the task state is set EXE by the scheduler()
231
     we extract the task from the ready queue
232
     NB: we can't assume that p is the first task in the queue!!! */
233
  qq_extract(p, &lev->ready);
234
 
235
 
236
  #ifdef __TEST1__
237
  if (testactive)
238
  {
239
    TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time);
240
    s_curr[useds] = proc_table[p].avail_time;
241
    s_PID[useds]  = p;
242
    useds++;
243
  }
244
  #endif
245
//  if (nostop) kern_printf("Û");
246
//  kern_printf("(RR d %d)",nostop);
247
}
248
 
249
static void RR_task_epilogue(LEVEL l, PID p)
250
{
251
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
252
 
253
  /* check if the slice is finished and insert the task in the correct
254
     qqueue position */
255
  if (proc_table[p].avail_time <= 0) {
256
    proc_table[p].avail_time += proc_table[p].wcet;
257
    qq_insertlast(p,&lev->ready);
258
  }
259
  else
260
    /* curr is >0, so the running task have to run for another curr usec */
261
    qq_insertfirst(p,&lev->ready);
262
 
263
  proc_table[p].status = RR_READY;
264
}
265
 
266
static void RR_task_activate(LEVEL l, PID p)
267
{
268
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
269
 
270
  /* Test if we are trying to activate a non sleeping task    */
271
  /* Ignore this; the task is already active                  */
272
  if (proc_table[p].status != SLEEP)
273
    return;
274
 
275
  ll_gettime(TIME_EXACT, &proc_table[p].request_time);
276
 
277
  /* Insert task in the correct position */
278
  proc_table[p].status = RR_READY;
279
  qq_insertlast(p,&lev->ready);
280
}
281
 
282
static void RR_task_insert(LEVEL l, PID p)
283
{
284
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
285
 
286
  /* Similar to RR_task_activate, but we don't check in what state
287
     the task is and we don't set the request_time */
288
 
289
  /* Insert task in the correct position */
290
  proc_table[p].status = RR_READY;
291
  qq_insertlast(p,&lev->ready);
292
}
293
 
294
static void RR_task_extract(LEVEL l, PID p)
295
{
296
  /* Extract the running task from the level
297
     . we have already extract it from the ready queue at the dispatch time.
298
     . the capacity event have to be removed by the generic kernel
299
     . the wcet don't need modification...
300
     . the state of the task is set by the calling function
301
 
302
     So, we do nothing!!!
303
  */
304
}
305
 
306
static void RR_task_endcycle(LEVEL l, PID p)
307
{
308
//  RR_level_des *lev = (RR_level_des *)(level_table[l]);
309
 
310
  /* this function is equal to the RR_task_extract, except that
311
     the task fall asleep... */
312
  proc_table[p].status = SLEEP;
313
}
314
 
315
static void RR_task_end(LEVEL l, PID p)
316
{
317
//  RR_level_des *lev = (RR_level_des *)(level_table[l]);
318
 
319
  /* we insert the task in the free queue */
320
  proc_table[p].status = FREE;
321
  q_insert(p,&freedesc);
322
}
323
 
324
static void RR_task_sleep(LEVEL l, PID p)
325
{
326
  proc_table[p].status = SLEEP;
327
}
328
 
329
static void RR_task_delay(LEVEL l, PID p, TIME usdelay)
330
{
331
//  RR_level_des *lev = (RR_level_des *)(level_table[l]);
332
  struct timespec wakeuptime;
333
 
334
  /* equal to RR_task_endcycle */
335
  proc_table[p].status = RR_DELAY;
336
 
337
  /* we need to delete this event if we kill the task while it is sleeping */
338
  ll_gettime(TIME_EXACT,&wakeuptime);
339
  ADDUSEC2TIMESPEC(usdelay,&wakeuptime);
340
  proc_table[p].delay_timer = kern_event_post(&wakeuptime,
341
                                              RR_timer_delay,
342
                                              (void *)p);
343
}
344
 
345
 
346
static int RR_guest_create(LEVEL l, PID p, TASK_MODEL *m)
347
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; }
348
 
349
static void RR_guest_detach(LEVEL l, PID p)
350
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
351
 
352
static void RR_guest_dispatch(LEVEL l, PID p, int nostop)
353
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
354
 
355
static void RR_guest_epilogue(LEVEL l, PID p)
356
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
357
 
358
static void RR_guest_activate(LEVEL l, PID p)
359
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
360
 
361
static void RR_guest_insert(LEVEL l, PID p)
362
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
363
 
364
static void RR_guest_extract(LEVEL l, PID p)
365
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
366
 
367
static void RR_guest_endcycle(LEVEL l, PID p)
368
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
369
 
370
static void RR_guest_end(LEVEL l, PID p)
371
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
372
 
373
static void RR_guest_sleep(LEVEL l, PID p)
374
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
375
 
376
static void RR_guest_delay(LEVEL l, PID p,DWORD tickdelay)
377
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
378
 
379
 
380
 
381
 
382
/* Registration functions */
383
 
384
/*+ This init function install the "main" task +*/
385
static void RR_call_main(void *l)
386
{
387
  LEVEL lev;
388
  PID p;
389
  NRT_TASK_MODEL m;
390
  void *mb;
391
 
392
  lev = (LEVEL)l;
393
 
394
  nrt_task_default_model(m);
395
  nrt_task_def_level(m,lev); /* with this we are sure that the task arrives
396
                                to the correct level */
397
 
398
  mb = ((RR_level_des *)level_table[lev])->multiboot;
399
  nrt_task_def_arg(m,mb);
400
  nrt_task_def_usemath(m);
401
  nrt_task_def_nokill(m);
402
  nrt_task_def_ctrl_jet(m);
403
 
404
  p = task_create("Main", __init__, (TASK_MODEL *)&m, NULL);
405
 
406
  if (p == NIL)
407
    kern_printf("\nPanic!!! can't create main task... errno =%d\n",errno);
408
 
409
  RR_task_activate(lev,p);
410
}
411
 
412
 
413
/*+ Registration function:
414
    TIME slice                the slice for the Round Robin queue
415
    int createmain            1 if the level creates the main task 0 otherwise
416
    struct multiboot_info *mb used if createmain specified   +*/
417
void RR_register_level(TIME slice,
418
                       int createmain,
419
                       struct multiboot_info *mb)
420
{
421
  LEVEL l;            /* the level that we register */
422
  RR_level_des *lev;  /* for readableness only */
423
 
424
  printk("RR_register_level\n");
425
 
426
  /* request an entry in the level_table */
427
  l = level_alloc_descriptor();
428
 
429
  /* alloc the space needed for the RR_level_des */
430
  lev = (RR_level_des *)kern_alloc(sizeof(RR_level_des));
431
 
432
  printk("    lev=%d\n",(int)lev);
433
 
434
  /* update the level_table with the new entry */
435
  level_table[l] = (level_des *)lev;
436
 
437
  /* fill the standard descriptor */
438
  strncpy(lev->l.level_name,  RR_LEVELNAME, MAX_LEVELNAME);
439
  lev->l.level_code               = RR_LEVEL_CODE;
440
  lev->l.level_version            = RR_LEVEL_VERSION;
441
 
442
  lev->l.level_accept_task_model  = RR_level_accept_task_model;
443
  lev->l.level_accept_guest_model = RR_level_accept_guest_model;
444
  lev->l.level_status             = RR_level_status;
445
  lev->l.level_scheduler          = RR_level_scheduler;
446
  lev->l.level_guarantee          = RR_level_guarantee;
447
 
448
  lev->l.task_create              = RR_task_create;
449
  lev->l.task_detach              = RR_task_detach;
450
  lev->l.task_eligible            = RR_task_eligible;
451
  lev->l.task_dispatch            = RR_task_dispatch;
452
  lev->l.task_epilogue            = RR_task_epilogue;
453
  lev->l.task_activate            = RR_task_activate;
454
  lev->l.task_insert              = RR_task_insert;
455
  lev->l.task_extract             = RR_task_extract;
456
  lev->l.task_endcycle            = RR_task_endcycle;
457
  lev->l.task_end                 = RR_task_end;
458
  lev->l.task_sleep               = RR_task_sleep;
459
  lev->l.task_delay               = RR_task_delay;
460
 
461
  lev->l.guest_create             = RR_guest_create;
462
  lev->l.guest_detach             = RR_guest_detach;
463
  lev->l.guest_dispatch           = RR_guest_dispatch;
464
  lev->l.guest_epilogue           = RR_guest_epilogue;
465
  lev->l.guest_activate           = RR_guest_activate;
466
  lev->l.guest_insert             = RR_guest_insert;
467
  lev->l.guest_extract            = RR_guest_extract;
468
  lev->l.guest_endcycle           = RR_guest_endcycle;
469
  lev->l.guest_end                = RR_guest_end;
470
  lev->l.guest_sleep              = RR_guest_sleep;
471
  lev->l.guest_delay              = RR_guest_delay;
472
 
473
  /* fill the RR descriptor part */
474
  qq_init(&lev->ready);
475
 
476
  if (slice < RR_MINIMUM_SLICE) slice = RR_MINIMUM_SLICE;
477
  if (slice > RR_MAXIMUM_SLICE) slice = RR_MAXIMUM_SLICE;
478
  lev->slice      = slice;
479
 
480
  lev->multiboot  = mb;
481
 
482
  if (createmain)
483
    sys_atrunlevel(RR_call_main,(void *) l, RUNLEVEL_INIT);
484
}
485
 
486