Subversion Repositories shark

Rev

Rev 38 | Rev 317 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
228 giacomo 23
 CVS :        $Id: pc.c,v 1.3 2003-09-12 10:10:41 giacomo Exp $
2 pj 24
 
25
 File:        $File$
228 giacomo 26
 Revision:    $Revision: 1.3 $
27
 Last update: $Date: 2003-09-12 10:10:41 $
2 pj 28
 ------------
29
 
30
 Priority Ceiling protocol. see pc.h for more details...
31
 
32
**/
33
 
34
/*
35
 * Copyright (C) 2000 Paolo Gai
36
 *
37
 * This program is free software; you can redistribute it and/or modify
38
 * it under the terms of the GNU General Public License as published by
39
 * the Free Software Foundation; either version 2 of the License, or
40
 * (at your option) any later version.
41
 *
42
 * This program is distributed in the hope that it will be useful,
43
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
44
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
45
 * GNU General Public License for more details.
46
 *
47
 * You should have received a copy of the GNU General Public License
48
 * along with this program; if not, write to the Free Software
49
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
50
 *
51
 */
52
 
53
 
54
#include <modules/pc.h>
55
 
56
#include <ll/ll.h>
57
#include <ll/string.h>
58
#include <ll/stdio.h>
59
#include <kernel/const.h>
60
#include <sys/types.h>
61
#include <kernel/var.h>
62
#include <kernel/func.h>
63
 
64
typedef struct PC_mutexstruct_t PC_mutex_t;
65
 
66
/* The PC resource level descriptor */
67
typedef struct {
68
  mutex_resource_des m;     /*+ the mutex interface +*/
69
 
70
  int nlocked[MAX_PROC];    /*+ how many mutex a task currently locks +*/
71
 
72
  PC_mutex_t *mlist;        /*+ the list of the busy mutexes +*/
73
  DWORD priority[MAX_PROC]; /*+ the PC priority of the tasks in the system +*/
74
 
75
  PID blocked[MAX_PROC];
76
 
77
} PC_mutex_resource_des;
78
 
79
/* this is the structure normally pointed by the opt field in the
80
   mutex_t structure */
81
struct PC_mutexstruct_t {
82
  PID owner;
83
  int nblocked;
84
  PID firstblocked;
85
 
86
  DWORD ceiling;
87
  PC_mutex_t *next;
88
  PC_mutex_t *prev;
89
};
90
 
91
/* This is the test done when a task try to lock a mutex.
92
   It checks if the system ceiling is less than the process priority
93
   It returns 1 if the task can lock the mutex, 0 otherwise */
94
static int PC_accept(PC_mutex_resource_des *lev, DWORD prio)
95
{
96
  PC_mutex_t *l = lev->mlist;
97
 
98
  while (l) {
99
    if (l->owner != exec_shadow)
100
      /* l points to a mutex owned by another task. Its ceiling is the
101
         system ceiling... */
102
      return prio < l->ceiling;
103
 
104
    l = l->next;
105
  }
106
 
107
  /* no busy mutexes other than mine!!! */
108
  return 1;
109
}
110
 
111
/* this function inserts a mutex in the mutex list.
112
   the code is similar to q_insert of queue.c */
113
static void PC_insert(PC_mutex_resource_des *lev, PC_mutex_t * m)
114
{
115
    DWORD prio;
116
    PC_mutex_t *p, *q;
117
 
118
    p = NULL;
119
    q = lev->mlist;
120
    prio = m->ceiling;
121
 
122
    while ((q != NULL) && (prio >= q->ceiling)) {
123
        p = q;
124
        q = q->next;
125
    }
126
 
127
    if (p != NULL)
128
      p->next = m;
129
    else
130
      lev->mlist = m;
131
 
132
    if (q != NULL) q->prev = m;
133
 
134
    m->next = q;
135
    m->prev = p;
136
}
137
 
138
/* this function extracts a mutex in the mutex list.
139
   the code is similar to q_extract of queue.c */
140
static void PC_extract(PC_mutex_resource_des *lev, PC_mutex_t * m)
141
{
142
    PC_mutex_t *p, *q;
143
 
144
    //kern_printf("extract: prev=%d next = %d\n",m->prev, m->next);
145
    p = m->prev;
146
    q = m->next;
147
 
148
    if (p == NULL) lev->mlist = q;
149
    else p->next = m->next;
150
 
151
    if (q != NULL) q->prev = m->prev;
152
}
153
 
154
 
38 pj 155
#if 0
2 pj 156
/*+ print resource protocol statistics...+*/
157
static void PC_resource_status(RLEVEL r)
158
{
159
  PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[r]);
160
  PID i;
161
 
162
  kern_printf("Resources owned by the tasks:\n");
163
  for (i=0; i<MAX_PROC; i++) {
164
     kern_printf("%-4d", m->nlocked[i]);
165
  }
166
 
167
  kern_printf("\nPC priority of the tasks:\n");
168
  for (i=0; i<MAX_PROC; i++) {
169
     kern_printf("%-4ld", m->priority[i]);
170
  }
171
  // in the future: print the status of the blocked semaphores!
172
 
173
}
38 pj 174
#endif
2 pj 175
 
38 pj 176
static int PC_res_register(RLEVEL l, PID p, RES_MODEL *r)
177
{
178
  PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[l]);
179
  PC_RES_MODEL *pc;
2 pj 180
 
38 pj 181
  if (r->rclass != PC_RCLASS)
2 pj 182
    return -1;
38 pj 183
  if (r->level && r->level !=l)
184
    return -1;
2 pj 185
 
38 pj 186
  pc = (PC_RES_MODEL *)r;
2 pj 187
 
188
  m->priority[p] = pc->priority;
189
  m->nlocked[p] = 0;
38 pj 190
 
191
  return 0;
2 pj 192
}
193
 
194
static void PC_res_detach(RLEVEL l, PID p)
195
{
196
  PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[l]);
197
 
198
  if (m->nlocked[p])
199
    kern_raise(XMUTEX_OWNER_KILLED, p);
200
  else
201
    m->nlocked[p] = 0;
202
 
203
  m->priority[p] = MAX_DWORD;
204
}
205
 
206
static int PC_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
207
{
208
  PC_mutex_t *p;
209
 
38 pj 210
  if (a->mclass != PC_MCLASS)
211
    return -1;
212
 
2 pj 213
  p = (PC_mutex_t *) kern_alloc(sizeof(PC_mutex_t));
214
 
215
  /* control if there is enough memory; no control on init on a
216
     non- destroyed mutex */
217
 
218
  if (!p)
219
    return (ENOMEM);
220
 
221
  p->owner = NIL;
222
  p->nblocked = 0;
223
  p->firstblocked = NIL;
224
 
225
  p->ceiling = ((PC_mutexattr_t *)a)->ceiling;
226
  p->next = 0;
227
 
228
 
229
  m->mutexlevel = l;
230
  m->opt = (void *)p;
231
 
232
  return 0;
233
}
234
 
235
 
236
static int PC_destroy(RLEVEL l, mutex_t *m)
237
{
238
//  PC_mutex_resource_des *lev = (PC_mutex_resource_des *)(resource_table[l]);
239
 
240
  if ( ((PC_mutex_t *)m->opt)->nblocked)
241
    return (EBUSY);
242
 
243
  kern_cli();
244
  if (m->opt) {
245
    kern_free(m->opt,sizeof(PC_mutex_t));
246
    m->opt = NULL;
247
  }
248
  kern_sti();
249
 
250
  return 0;
251
}
252
 
253
/* see pi.c for informations on the blocking algorithm used */
254
static int PC_lock(RLEVEL l, mutex_t *m)
255
{
256
  PC_mutex_resource_des *lev = (PC_mutex_resource_des *)(resource_table[l]);
257
  PC_mutex_t *p;
258
 
259
  kern_cli();
260
 
261
  p = (PC_mutex_t *)m->opt;
228 giacomo 262
  if (!p) {
2 pj 263
    /* if the mutex is not initialized, return an error! */
228 giacomo 264
    kern_sti();
2 pj 265
    return (EINVAL);
228 giacomo 266
  }
2 pj 267
 
268
  if (p->owner == exec_shadow) {
269
    /* the task already owns the mutex */
270
    kern_sti();
271
    return (EDEADLK);
272
  }
273
 
228 giacomo 274
  if (p->ceiling > lev->priority[exec_shadow]) {
2 pj 275
    /* see POSIX standard p. 258 */
228 giacomo 276
    kern_sti();
2 pj 277
    return (EINVAL);
228 giacomo 278
  }
2 pj 279
 
280
  while (!PC_accept(lev, lev->priority[exec_shadow])) {
281
    /* the mutex is locked by someone,
282
       or another mutex with greater ceiling is busy,
283
       "block" the task on the busy mutex with the highest ceiling
284
       (pointed by lev->mlist)...*/
285
 
286
    //kern_printf("Blocking on %d, owner=%d, exec_shadow=%d\n",lev->mlist,lev->mlist->owner,exec_shadow);
287
    proc_table[exec_shadow].shadow = lev->mlist->owner;
288
    lev->blocked[exec_shadow] = lev->mlist->firstblocked;
289
    lev->mlist->firstblocked = exec_shadow;
290
    lev->mlist->nblocked++;
291
 
292
    /* ... call the scheduler... */
293
    scheduler();
294
    //kern_printf("schedule: exec=%d, exec_shadow=%d\n",exec,exec_shadow);
295
    kern_context_load(proc_table[exec_shadow].context);
296
 
297
    /* ... and reaquire the cli() before the test... */
298
    kern_cli();
299
  }
300
 
301
  /* the mutex is free, We can lock it! */
302
  lev = (PC_mutex_resource_des *)(resource_table[l]);
303
  lev->nlocked[exec_shadow]++;
304
 
305
  p->owner = exec_shadow;
306
 
307
  PC_insert(lev, p);
308
 
309
  kern_sti();
310
 
311
  return 0;
312
}
313
 
314
static int PC_trylock(RLEVEL l, mutex_t *m)
315
{
316
  PC_mutex_resource_des *lev = (PC_mutex_resource_des *)(resource_table[l]);
317
  PC_mutex_t *p;
318
 
319
  kern_cli();
320
 
321
  p = (PC_mutex_t *)m->opt;
228 giacomo 322
  if (!p) {
2 pj 323
    /* if the mutex is not initialized, return an error! */
228 giacomo 324
    kern_sti();
2 pj 325
    return (EINVAL);
228 giacomo 326
  }
2 pj 327
 
328
  if (p->owner == exec_shadow) {
329
    /* the task already owns the mutex */
330
    kern_sti();
331
    return (EDEADLK);
332
  }
333
 
228 giacomo 334
  if (p->ceiling < lev->priority[exec_shadow]) {
2 pj 335
    /* see POSIX standard p. 258 */
228 giacomo 336
    kern_sti();
2 pj 337
    return (EINVAL);
228 giacomo 338
  }
2 pj 339
 
340
  while (!PC_accept(lev, lev->priority[exec_shadow])) {
341
    /* a task already owns the mutex */
342
    kern_sti();
343
    return (EBUSY);
344
  }
345
 
346
  /* the mutex is free, We can lock it! */
347
  lev = (PC_mutex_resource_des *)(resource_table[l]);
348
  lev->nlocked[exec_shadow]++;
349
 
350
  p->owner = exec_shadow;
351
 
352
  PC_insert(lev, p);
353
 
354
  kern_sti();
355
 
356
  return 0;
357
}
358
 
359
static int PC_unlock(RLEVEL l, mutex_t *m)
360
{
361
  PC_mutex_resource_des *lev;
362
  PC_mutex_t *p;
363
  int i, j;
364
 
365
  p = (PC_mutex_t *)m->opt;
366
  if (!p)
367
    return (EINVAL);
368
 
369
  if (p->owner != exec_shadow) {
370
    /* the mutex is owned by another task!!! */
371
    kern_sti();
372
    return (EPERM);
373
  }
374
 
375
  proc_table[exec_shadow].context = kern_context_save();
376
 
377
  /* the mutex is mine */
378
  lev = (PC_mutex_resource_des *)(resource_table[l]);
379
  lev->nlocked[exec_shadow]--;
380
 
381
  p->owner = NIL;
382
 
383
  /* we unblock all the waiting tasks... */
384
  i = p->firstblocked;
385
  p->firstblocked = NIL;
386
 
387
  while (i != NIL) {
388
    proc_table[i].shadow = j = i;
389
    i = lev->blocked[i];
390
    lev->blocked[j] = NIL;
391
  }
392
  p->nblocked = 0;
393
 
394
  PC_extract(lev, p);
395
 
396
/*  {
397
   int xxx;
398
   kern_printf("(PC_unlock owner=%d ",p->owner);
399
   for (xxx = 0; xxx<5; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow);
400
   kern_printf(")\n");
401
  }*/
402
 
403
  scheduler();
404
  kern_context_load(proc_table[exec_shadow].context);
405
 
406
  return 0;
407
}
408
 
38 pj 409
RLEVEL PC_register_module(void)
2 pj 410
{
411
  RLEVEL l;                  /* the level that we register */
412
  PC_mutex_resource_des *m;  /* for readableness only */
413
  PID i;                     /* a counter */
414
 
415
  printk("PC_register_module\n");
416
 
417
  /* request an entry in the level_table */
418
  l = resource_alloc_descriptor();
419
 
420
  /* alloc the space needed for the EDF_level_des */
421
  m = (PC_mutex_resource_des *)kern_alloc(sizeof(PC_mutex_resource_des));
422
 
423
  /* update the level_table with the new entry */
424
  resource_table[l] = (resource_des *)m;
425
 
426
  /* fill the resource_des descriptor */
427
  m->m.r.rtype                       = MUTEX_RTYPE;
428
  m->m.r.res_register                = PC_res_register;
429
  m->m.r.res_detach                  = PC_res_detach;
430
 
431
  /* fill the mutex_resource_des descriptor */
432
  m->m.init                          = PC_init;
433
  m->m.destroy                       = PC_destroy;
434
  m->m.lock                          = PC_lock;
435
  m->m.trylock                       = PC_trylock;
436
  m->m.unlock                        = PC_unlock;
437
 
438
  /* fill the PC_mutex_resource_des descriptor */
439
  for (i=0; i<MAX_PROC; i++)
440
    m->nlocked[i] = 0, m->priority[i] = MAX_DWORD, m->blocked[i] = NIL;
441
 
442
  m->mlist = NULL;
443
 
38 pj 444
  return l;
445
 
2 pj 446
}
447
 
448
/*+ This function gets the ceiling of a PC mutex, and it have to be called
449
    only by a task that owns the mutex.
450
    Returns -1 if the mutex is not a PC mutex, 0 otherwise +*/
451
int PC_get_mutex_ceiling(const mutex_t *mutex, DWORD *ceiling)
452
{
453
  resource_des *r;
454
 
455
  if (!mutex)
456
    return -1;
457
 
458
  r = resource_table[mutex->mutexlevel];
459
 
460
  if (ceiling)
461
    *ceiling = ((PC_mutex_t *)mutex->opt)->ceiling;
462
  else
463
    return -1;
464
 
465
  return 0;
466
}
467
 
468
/*+ This function sets the ceiling of a PC mutex, and it have to be called
469
    only by a task that owns the mutex.
470
    Returns -1 if the mutex is not a PC mutex, 0 otherwise +*/
471
int PC_set_mutex_ceiling(mutex_t *mutex, DWORD ceiling, DWORD *old_ceiling)
472
{
473
  resource_des *r;
474
 
475
  if (!mutex)
476
    return -1;
477
 
478
  r = resource_table[mutex->mutexlevel];
479
 
480
  if (old_ceiling)
481
    *old_ceiling = ((PC_mutex_t *)mutex->opt)->ceiling;
482
 
483
  ((PC_mutex_t *)mutex->opt)->ceiling = ceiling;
484
  return 0;
485
}
486
 
487
void PC_set_task_ceiling(RLEVEL r, PID p, DWORD priority)
488
{
489
  PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[r]);
490
  m->priority[p] = priority;
491
}
492