Subversion Repositories shark

Rev

Rev 14 | Rev 38 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
29 pj 23
 CVS :        $Id: rr.c,v 1.3 2002-11-11 08:32:06 pj Exp $
2 pj 24
 
25
 File:        $File$
29 pj 26
 Revision:    $Revision: 1.3 $
27
 Last update: $Date: 2002-11-11 08:32:06 $
2 pj 28
 ------------
29
 
30
 This file contains the scheduling module RR (Round Robin)
31
 
32
 Read rr.h for further details.
33
 
34
**/
35
 
36
/*
37
 * Copyright (C) 2000 Paolo Gai
38
 *
39
 * This program is free software; you can redistribute it and/or modify
40
 * it under the terms of the GNU General Public License as published by
41
 * the Free Software Foundation; either version 2 of the License, or
42
 * (at your option) any later version.
43
 *
44
 * This program is distributed in the hope that it will be useful,
45
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
46
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
47
 * GNU General Public License for more details.
48
 *
49
 * You should have received a copy of the GNU General Public License
50
 * along with this program; if not, write to the Free Software
51
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
52
 *
53
 */
54
 
55
 
56
#include <modules/rr.h>
57
#include <ll/stdio.h>
58
#include <ll/string.h>
59
#include <kernel/model.h>
60
#include <kernel/descr.h>
61
#include <kernel/var.h>
62
#include <kernel/func.h>
63
 
64
/*+ Status used in the level +*/
65
#define RR_READY   MODULE_STATUS_BASE
66
 
67
/*+ the level redefinition for the Round Robin level +*/
68
typedef struct {
69
  level_des l;     /*+ the standard level descriptor          +*/
70
 
29 pj 71
  IQUEUE ready;    /*+ the ready queue                        +*/
2 pj 72
 
73
  int slice;       /*+ the level's time slice                 +*/
74
 
75
  struct multiboot_info *multiboot; /*+ used if the level have to insert
76
                                        the main task +*/
77
} RR_level_des;
78
 
79
 
80
static char *RR_status_to_a(WORD status)
81
{
82
  if (status < MODULE_STATUS_BASE)
83
    return status_to_a(status);
84
 
85
  switch (status) {
86
    case RR_READY: return "RR_Ready";
87
    default      : return "RR_Unknown";
88
  }
89
}
90
 
91
static int RR_level_accept_task_model(LEVEL l, TASK_MODEL *m)
92
{
93
  if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))
94
    return 0;
95
  else
96
    return -1;
97
}
98
 
99
static int RR_level_accept_guest_model(LEVEL l, TASK_MODEL *m)
100
{
101
    return -1;
102
}
103
 
104
static void RR_level_status(LEVEL l)
105
{
106
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
29 pj 107
  PID p = iq_query_first(&lev->ready);
2 pj 108
 
109
  kern_printf("Slice: %d \n", lev->slice);
110
 
111
  while (p != NIL) {
112
    kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
113
              RR_status_to_a(proc_table[p].status));
29 pj 114
    p = iq_query_next(p,&lev->ready);
2 pj 115
  }
116
 
117
  for (p=0; p<MAX_PROC; p++)
118
    if (proc_table[p].task_level == l && proc_table[p].status != RR_READY
119
        && proc_table[p].status != FREE )
120
      kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
121
                RR_status_to_a(proc_table[p].status));
122
 
123
}
124
 
125
 
126
/* This is not efficient but very fair :-)
127
   The need of all this stuff is because if a task execute a long time
128
   due to (shadow!) priority inheritance, then the task shall go to the
129
   tail of the queue many times... */
130
static PID RR_level_scheduler(LEVEL l)
131
{
132
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
133
 
134
  PID p;
135
 
136
  for (;;) {
29 pj 137
    p = iq_query_first(&lev->ready);
2 pj 138
    if (p == -1)
139
      return p;
140
 
141
    if (proc_table[p].avail_time <= 0) {
142
      proc_table[p].avail_time += proc_table[p].wcet;
29 pj 143
      iq_extract(p,&lev->ready);
144
      iq_insertlast(p,&lev->ready);
2 pj 145
    }
146
    else
147
      return p;
148
  }
149
}
150
 
151
static int RR_level_guarantee(LEVEL l, bandwidth_t *freebandwidth)
152
{
153
  /* the RR level always guarantee... the function is defined because
154
     there can be an aperiodic server at a level with less priority than
155
     the RR that need guarantee (e.g., a TBS server) */
156
  return 1;
157
}
158
 
159
 
160
static int RR_task_create(LEVEL l, PID p, TASK_MODEL *m)
161
{
162
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
163
  NRT_TASK_MODEL *nrt = (NRT_TASK_MODEL *)m;
164
 
165
  /* the task state is set at SLEEP by the general task_create
166
     the only thing to set remains the capacity stuffs that are set
167
     to the values passed in the model... */
168
 
169
  /* I used the wcet field because using wcet can account if a task
170
     consume more than the timeslice... */
171
 
172
  if (nrt->slice) {
173
    proc_table[p].avail_time = nrt->slice;
174
    proc_table[p].wcet       = nrt->slice;
175
  }
176
  else {
177
    proc_table[p].avail_time = lev->slice;
178
    proc_table[p].wcet       = lev->slice;
179
  }
180
  proc_table[p].control   |= CONTROL_CAP;
181
 
182
  return 0; /* OK */
183
}
184
 
185
static void RR_task_detach(LEVEL l, PID p)
186
{
187
  /* the RR level doesn't introduce any new field in the TASK_MODEL
188
     so, all detach stuffs are done by the task_create
189
     The task state is set at FREE by the general task_create */
190
}
191
 
192
static int RR_task_eligible(LEVEL l, PID p)
193
{
194
  return 0; /* if the task p is chosen, it is always eligible */
195
}
196
 
197
static void RR_task_dispatch(LEVEL l, PID p, int nostop)
198
{
199
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
200
 
201
  /* the task state is set EXE by the scheduler()
202
     we extract the task from the ready queue
203
     NB: we can't assume that p is the first task in the queue!!! */
29 pj 204
  iq_extract(p, &lev->ready);
2 pj 205
}
206
 
207
static void RR_task_epilogue(LEVEL l, PID p)
208
{
209
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
210
 
211
  /* check if the slice is finished and insert the task in the correct
212
     qqueue position */
213
  if (proc_table[p].avail_time <= 0) {
214
    proc_table[p].avail_time += proc_table[p].wcet;
29 pj 215
    iq_insertlast(p,&lev->ready);
2 pj 216
  }
217
  else
218
    /* curr is >0, so the running task have to run for another curr usec */
29 pj 219
    iq_insertfirst(p,&lev->ready);
2 pj 220
 
221
  proc_table[p].status = RR_READY;
222
}
223
 
224
static void RR_task_activate(LEVEL l, PID p)
225
{
226
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
227
 
228
  /* Test if we are trying to activate a non sleeping task    */
229
  /* Ignore this; the task is already active                  */
230
  if (proc_table[p].status != SLEEP)
231
    return;
232
 
233
  ll_gettime(TIME_EXACT, &proc_table[p].request_time);
234
 
235
  /* Insert task in the correct position */
236
  proc_table[p].status = RR_READY;
29 pj 237
  iq_insertlast(p,&lev->ready);
2 pj 238
}
239
 
240
static void RR_task_insert(LEVEL l, PID p)
241
{
242
  RR_level_des *lev = (RR_level_des *)(level_table[l]);
243
 
244
  /* Similar to RR_task_activate, but we don't check in what state
245
     the task is and we don't set the request_time */
246
 
247
  /* Insert task in the correct position */
248
  proc_table[p].status = RR_READY;
29 pj 249
  iq_insertlast(p,&lev->ready);
2 pj 250
}
251
 
252
static void RR_task_extract(LEVEL l, PID p)
253
{
254
  /* Extract the running task from the level
255
     . we have already extract it from the ready queue at the dispatch time.
256
     . the capacity event have to be removed by the generic kernel
257
     . the wcet don't need modification...
258
     . the state of the task is set by the calling function
259
 
260
     So, we do nothing!!!
261
  */
262
}
263
 
264
static void RR_task_endcycle(LEVEL l, PID p)
265
{
266
//  RR_level_des *lev = (RR_level_des *)(level_table[l]);
267
 
268
  /* this function is equal to the RR_task_extract, except that
269
     the task fall asleep... */
270
  proc_table[p].status = SLEEP;
271
}
272
 
273
static void RR_task_end(LEVEL l, PID p)
274
{
275
//  RR_level_des *lev = (RR_level_des *)(level_table[l]);
276
 
277
  /* we insert the task in the free queue */
278
  proc_table[p].status = FREE;
29 pj 279
  iq_insertlast(p,&freedesc);
2 pj 280
}
281
 
282
static void RR_task_sleep(LEVEL l, PID p)
283
{
284
  proc_table[p].status = SLEEP;
285
}
286
 
287
static int RR_guest_create(LEVEL l, PID p, TASK_MODEL *m)
14 pj 288
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
2 pj 289
 
290
static void RR_guest_detach(LEVEL l, PID p)
14 pj 291
{ kern_raise(XINVALID_GUEST,exec_shadow); }
2 pj 292
 
293
static void RR_guest_dispatch(LEVEL l, PID p, int nostop)
14 pj 294
{ kern_raise(XINVALID_GUEST,exec_shadow); }
2 pj 295
 
296
static void RR_guest_epilogue(LEVEL l, PID p)
14 pj 297
{ kern_raise(XINVALID_GUEST,exec_shadow); }
2 pj 298
 
299
static void RR_guest_activate(LEVEL l, PID p)
14 pj 300
{ kern_raise(XINVALID_GUEST,exec_shadow); }
2 pj 301
 
302
static void RR_guest_insert(LEVEL l, PID p)
14 pj 303
{ kern_raise(XINVALID_GUEST,exec_shadow); }
2 pj 304
 
305
static void RR_guest_extract(LEVEL l, PID p)
14 pj 306
{ kern_raise(XINVALID_GUEST,exec_shadow); }
2 pj 307
 
308
static void RR_guest_endcycle(LEVEL l, PID p)
14 pj 309
{ kern_raise(XINVALID_GUEST,exec_shadow); }
2 pj 310
 
311
static void RR_guest_end(LEVEL l, PID p)
14 pj 312
{ kern_raise(XINVALID_GUEST,exec_shadow); }
2 pj 313
 
314
static void RR_guest_sleep(LEVEL l, PID p)
14 pj 315
{ kern_raise(XINVALID_GUEST,exec_shadow); }
2 pj 316
 
317
 
318
 
319
 
320
/* Registration functions */
321
 
322
/*+ This init function install the "main" task +*/
323
static void RR_call_main(void *l)
324
{
325
  LEVEL lev;
326
  PID p;
327
  NRT_TASK_MODEL m;
328
  void *mb;
329
 
330
  lev = (LEVEL)l;
331
 
332
  nrt_task_default_model(m);
333
  nrt_task_def_level(m,lev); /* with this we are sure that the task arrives
334
                                to the correct level */
335
 
336
  mb = ((RR_level_des *)level_table[lev])->multiboot;
337
  nrt_task_def_arg(m,mb);
338
  nrt_task_def_usemath(m);
339
  nrt_task_def_nokill(m);
340
  nrt_task_def_ctrl_jet(m);
341
 
342
  p = task_create("Main", __init__, (TASK_MODEL *)&m, NULL);
343
 
344
  if (p == NIL)
345
    kern_printf("\nPanic!!! can't create main task... errno =%d\n",errno);
346
 
347
  RR_task_activate(lev,p);
348
}
349
 
350
 
351
/*+ Registration function:
352
    TIME slice                the slice for the Round Robin queue
353
    int createmain            1 if the level creates the main task 0 otherwise
354
    struct multiboot_info *mb used if createmain specified   +*/
355
void RR_register_level(TIME slice,
356
                       int createmain,
357
                       struct multiboot_info *mb)
358
{
359
  LEVEL l;            /* the level that we register */
360
  RR_level_des *lev;  /* for readableness only */
361
 
362
  printk("RR_register_level\n");
363
 
364
  /* request an entry in the level_table */
365
  l = level_alloc_descriptor();
366
 
367
  /* alloc the space needed for the RR_level_des */
368
  lev = (RR_level_des *)kern_alloc(sizeof(RR_level_des));
369
 
370
  printk("    lev=%d\n",(int)lev);
371
 
372
  /* update the level_table with the new entry */
373
  level_table[l] = (level_des *)lev;
374
 
375
  /* fill the standard descriptor */
376
  strncpy(lev->l.level_name,  RR_LEVELNAME, MAX_LEVELNAME);
377
  lev->l.level_code               = RR_LEVEL_CODE;
378
  lev->l.level_version            = RR_LEVEL_VERSION;
379
 
380
  lev->l.level_accept_task_model  = RR_level_accept_task_model;
381
  lev->l.level_accept_guest_model = RR_level_accept_guest_model;
382
  lev->l.level_status             = RR_level_status;
383
  lev->l.level_scheduler          = RR_level_scheduler;
384
  lev->l.level_guarantee          = RR_level_guarantee;
385
 
386
  lev->l.task_create              = RR_task_create;
387
  lev->l.task_detach              = RR_task_detach;
388
  lev->l.task_eligible            = RR_task_eligible;
389
  lev->l.task_dispatch            = RR_task_dispatch;
390
  lev->l.task_epilogue            = RR_task_epilogue;
391
  lev->l.task_activate            = RR_task_activate;
392
  lev->l.task_insert              = RR_task_insert;
393
  lev->l.task_extract             = RR_task_extract;
394
  lev->l.task_endcycle            = RR_task_endcycle;
395
  lev->l.task_end                 = RR_task_end;
396
  lev->l.task_sleep               = RR_task_sleep;
397
 
398
  lev->l.guest_create             = RR_guest_create;
399
  lev->l.guest_detach             = RR_guest_detach;
400
  lev->l.guest_dispatch           = RR_guest_dispatch;
401
  lev->l.guest_epilogue           = RR_guest_epilogue;
402
  lev->l.guest_activate           = RR_guest_activate;
403
  lev->l.guest_insert             = RR_guest_insert;
404
  lev->l.guest_extract            = RR_guest_extract;
405
  lev->l.guest_endcycle           = RR_guest_endcycle;
406
  lev->l.guest_end                = RR_guest_end;
407
  lev->l.guest_sleep              = RR_guest_sleep;
408
 
409
  /* fill the RR descriptor part */
29 pj 410
  iq_init(&lev->ready, &freedesc, 0);
2 pj 411
 
412
  if (slice < RR_MINIMUM_SLICE) slice = RR_MINIMUM_SLICE;
413
  if (slice > RR_MAXIMUM_SLICE) slice = RR_MAXIMUM_SLICE;
414
  lev->slice      = slice;
415
 
416
  lev->multiboot  = mb;
417
 
418
  if (createmain)
419
    sys_atrunlevel(RR_call_main,(void *) l, RUNLEVEL_INIT);
420
}
421
 
422