Subversion Repositories shark

Rev

Rev 319 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
321 giacomo 23
 CVS :        $Id: pc.c,v 1.6 2003-11-06 09:42:23 giacomo Exp $
2 pj 24
 
25
 File:        $File$
321 giacomo 26
 Revision:    $Revision: 1.6 $
27
 Last update: $Date: 2003-11-06 09:42:23 $
2 pj 28
 ------------
29
 
30
 Priority Ceiling protocol. see pc.h for more details...
31
 
32
**/
33
 
34
/*
35
 * Copyright (C) 2000 Paolo Gai
36
 *
37
 * This program is free software; you can redistribute it and/or modify
38
 * it under the terms of the GNU General Public License as published by
39
 * the Free Software Foundation; either version 2 of the License, or
40
 * (at your option) any later version.
41
 *
42
 * This program is distributed in the hope that it will be useful,
43
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
44
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
45
 * GNU General Public License for more details.
46
 *
47
 * You should have received a copy of the GNU General Public License
48
 * along with this program; if not, write to the Free Software
49
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
50
 *
51
 */
52
 
53
 
54
#include <modules/pc.h>
55
 
56
#include <ll/ll.h>
57
#include <ll/string.h>
58
#include <ll/stdio.h>
59
#include <kernel/const.h>
60
#include <sys/types.h>
61
#include <kernel/var.h>
62
#include <kernel/func.h>
63
 
64
typedef struct PC_mutexstruct_t PC_mutex_t;
65
 
66
/* The PC resource level descriptor */
67
typedef struct {
68
  mutex_resource_des m;     /*+ the mutex interface +*/
69
 
70
  int nlocked[MAX_PROC];    /*+ how many mutex a task currently locks +*/
71
 
72
  PC_mutex_t *mlist;        /*+ the list of the busy mutexes +*/
73
  DWORD priority[MAX_PROC]; /*+ the PC priority of the tasks in the system +*/
74
 
75
  PID blocked[MAX_PROC];
76
 
77
} PC_mutex_resource_des;
78
 
79
/* this is the structure normally pointed by the opt field in the
80
   mutex_t structure */
81
struct PC_mutexstruct_t {
82
  PID owner;
83
  int nblocked;
84
  PID firstblocked;
85
 
86
  DWORD ceiling;
87
  PC_mutex_t *next;
88
  PC_mutex_t *prev;
89
};
90
 
91
/* This is the test done when a task try to lock a mutex.
92
   It checks if the system ceiling is less than the process priority
93
   It returns 1 if the task can lock the mutex, 0 otherwise */
94
static int PC_accept(PC_mutex_resource_des *lev, DWORD prio)
95
{
96
  PC_mutex_t *l = lev->mlist;
97
 
98
  while (l) {
99
    if (l->owner != exec_shadow)
100
      /* l points to a mutex owned by another task. Its ceiling is the
101
         system ceiling... */
102
      return prio < l->ceiling;
103
 
104
    l = l->next;
105
  }
106
 
107
  /* no busy mutexes other than mine!!! */
108
  return 1;
109
}
110
 
111
/* this function inserts a mutex in the mutex list.
112
   the code is similar to q_insert of queue.c */
113
static void PC_insert(PC_mutex_resource_des *lev, PC_mutex_t * m)
114
{
115
    DWORD prio;
116
    PC_mutex_t *p, *q;
117
 
118
    p = NULL;
119
    q = lev->mlist;
120
    prio = m->ceiling;
121
 
122
    while ((q != NULL) && (prio >= q->ceiling)) {
123
        p = q;
124
        q = q->next;
125
    }
126
 
127
    if (p != NULL)
128
      p->next = m;
129
    else
130
      lev->mlist = m;
131
 
132
    if (q != NULL) q->prev = m;
133
 
134
    m->next = q;
135
    m->prev = p;
136
}
137
 
138
/* this function extracts a mutex in the mutex list.
139
   the code is similar to q_extract of queue.c */
140
static void PC_extract(PC_mutex_resource_des *lev, PC_mutex_t * m)
141
{
142
    PC_mutex_t *p, *q;
143
 
144
    //kern_printf("extract: prev=%d next = %d\n",m->prev, m->next);
145
    p = m->prev;
146
    q = m->next;
147
 
148
    if (p == NULL) lev->mlist = q;
149
    else p->next = m->next;
150
 
151
    if (q != NULL) q->prev = m->prev;
152
}
153
 
154
 
38 pj 155
#if 0
2 pj 156
/*+ print resource protocol statistics...+*/
157
static void PC_resource_status(RLEVEL r)
158
{
159
  PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[r]);
160
  PID i;
161
 
162
  kern_printf("Resources owned by the tasks:\n");
163
  for (i=0; i<MAX_PROC; i++) {
164
     kern_printf("%-4d", m->nlocked[i]);
165
  }
166
 
167
  kern_printf("\nPC priority of the tasks:\n");
168
  for (i=0; i<MAX_PROC; i++) {
169
     kern_printf("%-4ld", m->priority[i]);
170
  }
171
  // in the future: print the status of the blocked semaphores!
172
 
173
}
38 pj 174
#endif
2 pj 175
 
38 pj 176
static int PC_res_register(RLEVEL l, PID p, RES_MODEL *r)
177
{
178
  PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[l]);
179
  PC_RES_MODEL *pc;
2 pj 180
 
38 pj 181
  if (r->rclass != PC_RCLASS)
2 pj 182
    return -1;
38 pj 183
  if (r->level && r->level !=l)
184
    return -1;
2 pj 185
 
38 pj 186
  pc = (PC_RES_MODEL *)r;
2 pj 187
 
188
  m->priority[p] = pc->priority;
189
  m->nlocked[p] = 0;
38 pj 190
 
191
  return 0;
2 pj 192
}
193
 
194
static void PC_res_detach(RLEVEL l, PID p)
195
{
196
  PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[l]);
197
 
198
  if (m->nlocked[p])
199
    kern_raise(XMUTEX_OWNER_KILLED, p);
200
  else
201
    m->nlocked[p] = 0;
202
 
203
  m->priority[p] = MAX_DWORD;
204
}
205
 
206
static int PC_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
207
{
208
  PC_mutex_t *p;
209
 
38 pj 210
  if (a->mclass != PC_MCLASS)
211
    return -1;
212
 
2 pj 213
  p = (PC_mutex_t *) kern_alloc(sizeof(PC_mutex_t));
214
 
215
  /* control if there is enough memory; no control on init on a
216
     non- destroyed mutex */
217
 
218
  if (!p)
219
    return (ENOMEM);
220
 
221
  p->owner = NIL;
222
  p->nblocked = 0;
223
  p->firstblocked = NIL;
224
 
225
  p->ceiling = ((PC_mutexattr_t *)a)->ceiling;
226
  p->next = 0;
227
 
228
 
229
  m->mutexlevel = l;
230
  m->opt = (void *)p;
231
 
232
  return 0;
233
}
234
 
235
 
236
static int PC_destroy(RLEVEL l, mutex_t *m)
237
{
238
//  PC_mutex_resource_des *lev = (PC_mutex_resource_des *)(resource_table[l]);
317 giacomo 239
  SYS_FLAGS f;
2 pj 240
 
241
  if ( ((PC_mutex_t *)m->opt)->nblocked)
242
    return (EBUSY);
243
 
317 giacomo 244
  f = kern_fsave();
2 pj 245
  if (m->opt) {
246
    kern_free(m->opt,sizeof(PC_mutex_t));
247
    m->opt = NULL;
248
  }
317 giacomo 249
  kern_frestore(f);
2 pj 250
 
251
  return 0;
252
}
253
 
254
/* see pi.c for informations on the blocking algorithm used */
255
static int PC_lock(RLEVEL l, mutex_t *m)
256
{
257
  PC_mutex_resource_des *lev = (PC_mutex_resource_des *)(resource_table[l]);
258
  PC_mutex_t *p;
317 giacomo 259
  SYS_FLAGS f;
2 pj 260
 
317 giacomo 261
  f = kern_fsave();
2 pj 262
 
263
  p = (PC_mutex_t *)m->opt;
228 giacomo 264
  if (!p) {
2 pj 265
    /* if the mutex is not initialized, return an error! */
317 giacomo 266
    kern_frestore(f);
2 pj 267
    return (EINVAL);
228 giacomo 268
  }
2 pj 269
 
270
  if (p->owner == exec_shadow) {
271
    /* the task already owns the mutex */
317 giacomo 272
    kern_frestore(f);
2 pj 273
    return (EDEADLK);
274
  }
275
 
228 giacomo 276
  if (p->ceiling > lev->priority[exec_shadow]) {
2 pj 277
    /* see POSIX standard p. 258 */
317 giacomo 278
    kern_frestore(f);
2 pj 279
    return (EINVAL);
228 giacomo 280
  }
2 pj 281
 
282
  while (!PC_accept(lev, lev->priority[exec_shadow])) {
283
    /* the mutex is locked by someone,
284
       or another mutex with greater ceiling is busy,
285
       "block" the task on the busy mutex with the highest ceiling
286
       (pointed by lev->mlist)...*/
287
 
288
    //kern_printf("Blocking on %d, owner=%d, exec_shadow=%d\n",lev->mlist,lev->mlist->owner,exec_shadow);
289
    proc_table[exec_shadow].shadow = lev->mlist->owner;
290
    lev->blocked[exec_shadow] = lev->mlist->firstblocked;
291
    lev->mlist->firstblocked = exec_shadow;
292
    lev->mlist->nblocked++;
293
 
294
    /* ... call the scheduler... */
295
    scheduler();
296
    //kern_printf("schedule: exec=%d, exec_shadow=%d\n",exec,exec_shadow);
297
    kern_context_load(proc_table[exec_shadow].context);
298
 
299
    /* ... and reaquire the cli() before the test... */
319 giacomo 300
    kern_cli();
2 pj 301
  }
302
 
303
  /* the mutex is free, We can lock it! */
304
  lev = (PC_mutex_resource_des *)(resource_table[l]);
305
  lev->nlocked[exec_shadow]++;
306
 
307
  p->owner = exec_shadow;
308
 
309
  PC_insert(lev, p);
310
 
317 giacomo 311
  kern_frestore(f);
2 pj 312
 
313
  return 0;
314
}
315
 
316
static int PC_trylock(RLEVEL l, mutex_t *m)
317
{
318
  PC_mutex_resource_des *lev = (PC_mutex_resource_des *)(resource_table[l]);
319
  PC_mutex_t *p;
317 giacomo 320
  SYS_FLAGS f;
2 pj 321
 
317 giacomo 322
  f = kern_fsave();
2 pj 323
 
324
  p = (PC_mutex_t *)m->opt;
228 giacomo 325
  if (!p) {
2 pj 326
    /* if the mutex is not initialized, return an error! */
317 giacomo 327
    kern_frestore(f);
2 pj 328
    return (EINVAL);
228 giacomo 329
  }
2 pj 330
 
331
  if (p->owner == exec_shadow) {
332
    /* the task already owns the mutex */
317 giacomo 333
    kern_frestore(f);
2 pj 334
    return (EDEADLK);
335
  }
336
 
228 giacomo 337
  if (p->ceiling < lev->priority[exec_shadow]) {
2 pj 338
    /* see POSIX standard p. 258 */
317 giacomo 339
    kern_frestore(f);
2 pj 340
    return (EINVAL);
228 giacomo 341
  }
2 pj 342
 
343
  while (!PC_accept(lev, lev->priority[exec_shadow])) {
344
    /* a task already owns the mutex */
317 giacomo 345
    kern_frestore(f);
2 pj 346
    return (EBUSY);
347
  }
348
 
349
  /* the mutex is free, We can lock it! */
350
  lev = (PC_mutex_resource_des *)(resource_table[l]);
351
  lev->nlocked[exec_shadow]++;
352
 
353
  p->owner = exec_shadow;
354
 
355
  PC_insert(lev, p);
356
 
317 giacomo 357
  kern_frestore(f);
2 pj 358
 
359
  return 0;
360
}
361
 
362
static int PC_unlock(RLEVEL l, mutex_t *m)
363
{
364
  PC_mutex_resource_des *lev;
365
  PC_mutex_t *p;
366
  int i, j;
367
 
368
  p = (PC_mutex_t *)m->opt;
369
  if (!p)
370
    return (EINVAL);
371
 
372
  if (p->owner != exec_shadow) {
373
    /* the mutex is owned by another task!!! */
321 giacomo 374
    kern_sti();
2 pj 375
    return (EPERM);
376
  }
377
 
378
  proc_table[exec_shadow].context = kern_context_save();
379
 
380
  /* the mutex is mine */
381
  lev = (PC_mutex_resource_des *)(resource_table[l]);
382
  lev->nlocked[exec_shadow]--;
383
 
384
  p->owner = NIL;
385
 
386
  /* we unblock all the waiting tasks... */
387
  i = p->firstblocked;
388
  p->firstblocked = NIL;
389
 
390
  while (i != NIL) {
391
    proc_table[i].shadow = j = i;
392
    i = lev->blocked[i];
393
    lev->blocked[j] = NIL;
394
  }
395
  p->nblocked = 0;
396
 
397
  PC_extract(lev, p);
398
 
399
/*  {
400
   int xxx;
401
   kern_printf("(PC_unlock owner=%d ",p->owner);
402
   for (xxx = 0; xxx<5; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow);
403
   kern_printf(")\n");
404
  }*/
405
 
406
  scheduler();
407
  kern_context_load(proc_table[exec_shadow].context);
408
 
409
  return 0;
410
}
411
 
38 pj 412
RLEVEL PC_register_module(void)
2 pj 413
{
414
  RLEVEL l;                  /* the level that we register */
415
  PC_mutex_resource_des *m;  /* for readableness only */
416
  PID i;                     /* a counter */
417
 
418
  printk("PC_register_module\n");
419
 
420
  /* request an entry in the level_table */
421
  l = resource_alloc_descriptor();
422
 
423
  /* alloc the space needed for the EDF_level_des */
424
  m = (PC_mutex_resource_des *)kern_alloc(sizeof(PC_mutex_resource_des));
425
 
426
  /* update the level_table with the new entry */
427
  resource_table[l] = (resource_des *)m;
428
 
429
  /* fill the resource_des descriptor */
430
  m->m.r.rtype                       = MUTEX_RTYPE;
431
  m->m.r.res_register                = PC_res_register;
432
  m->m.r.res_detach                  = PC_res_detach;
433
 
434
  /* fill the mutex_resource_des descriptor */
435
  m->m.init                          = PC_init;
436
  m->m.destroy                       = PC_destroy;
437
  m->m.lock                          = PC_lock;
438
  m->m.trylock                       = PC_trylock;
439
  m->m.unlock                        = PC_unlock;
440
 
441
  /* fill the PC_mutex_resource_des descriptor */
442
  for (i=0; i<MAX_PROC; i++)
443
    m->nlocked[i] = 0, m->priority[i] = MAX_DWORD, m->blocked[i] = NIL;
444
 
445
  m->mlist = NULL;
446
 
38 pj 447
  return l;
448
 
2 pj 449
}
450
 
451
/*+ This function gets the ceiling of a PC mutex, and it have to be called
452
    only by a task that owns the mutex.
453
    Returns -1 if the mutex is not a PC mutex, 0 otherwise +*/
454
int PC_get_mutex_ceiling(const mutex_t *mutex, DWORD *ceiling)
455
{
456
  resource_des *r;
457
 
458
  if (!mutex)
459
    return -1;
460
 
461
  r = resource_table[mutex->mutexlevel];
462
 
463
  if (ceiling)
464
    *ceiling = ((PC_mutex_t *)mutex->opt)->ceiling;
465
  else
466
    return -1;
467
 
468
  return 0;
469
}
470
 
471
/*+ This function sets the ceiling of a PC mutex, and it have to be called
472
    only by a task that owns the mutex.
473
    Returns -1 if the mutex is not a PC mutex, 0 otherwise +*/
474
int PC_set_mutex_ceiling(mutex_t *mutex, DWORD ceiling, DWORD *old_ceiling)
475
{
476
  resource_des *r;
477
 
478
  if (!mutex)
479
    return -1;
480
 
481
  r = resource_table[mutex->mutexlevel];
482
 
483
  if (old_ceiling)
484
    *old_ceiling = ((PC_mutex_t *)mutex->opt)->ceiling;
485
 
486
  ((PC_mutex_t *)mutex->opt)->ceiling = ceiling;
487
  return 0;
488
}
489
 
490
void PC_set_task_ceiling(RLEVEL r, PID p, DWORD priority)
491
{
492
  PC_mutex_resource_des *m = (PC_mutex_resource_des *)(resource_table[r]);
493
  m->priority[p] = priority;
494
}
495