Subversion Repositories shark

Rev

Rev 2 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
23
 CVS :        $Id: pi.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
24
 
25
 File:        $File$
26
 Revision:    $Revision: 1.1.1.1 $
27
 Last update: $Date: 2002-03-29 14:12:52 $
28
 ------------
29
 
30
 Priority Inhertitance protocol. see pi.h for more details...
31
 
32
**/
33
 
34
/*
35
 * Copyright (C) 2000 Paolo Gai
36
 *
37
 * This program is free software; you can redistribute it and/or modify
38
 * it under the terms of the GNU General Public License as published by
39
 * the Free Software Foundation; either version 2 of the License, or
40
 * (at your option) any later version.
41
 *
42
 * This program is distributed in the hope that it will be useful,
43
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
44
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
45
 * GNU General Public License for more details.
46
 *
47
 * You should have received a copy of the GNU General Public License
48
 * along with this program; if not, write to the Free Software
49
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
50
 *
51
 */
52
 
53
 
54
#include <modules/pi.h>
55
 
56
#include <ll/ll.h>
57
#include <ll/string.h>
58
#include <ll/stdio.h>
59
#include <modules/codes.h>
60
#include <kernel/const.h>
61
#include <sys/types.h>
62
#include <kernel/descr.h>
63
#include <kernel/var.h>
64
#include <kernel/func.h>
65
 
66
/* The PI resource level descriptor */
67
typedef struct {
68
  mutex_resource_des m;   /*+ the mutex interface +*/
69
 
70
  int nlocked[MAX_PROC];  /*+ how many mutex a task currently locks +*/
71
 
72
  PID blocked[MAX_PROC];  /*+ blocked queue ... +*/
73
} PI_mutex_resource_des;
74
 
75
 
76
/* this is the structure normally pointed by the opt field in the
77
   mutex_t structure */
78
typedef struct {
79
  PID owner;
80
  int nblocked;
81
  PID firstblocked;
82
} PI_mutex_t;
83
 
84
 
85
 
86
/*+ print resource protocol statistics...+*/
87
static void PI_resource_status(RLEVEL r)
88
{
89
  PI_mutex_resource_des *m = (PI_mutex_resource_des *)(resource_table[r]);
90
  PID i;
91
 
92
  kern_printf("Resources owned by the tasks:\n");
93
  for (i=0; i<MAX_PROC; i++) {
94
     kern_printf("%-4d", m->nlocked[i]);
95
  }
96
}
97
 
98
 
99
static int PI_level_accept_resource_model(RLEVEL l, RES_MODEL *r)
100
{
101
  /* priority inheritance works with all tasks without Resource parameters */
102
  return -1;
103
}
104
 
105
static void PI_res_register(RLEVEL l, PID p, RES_MODEL *r)
106
{
107
  /* never called!!! */
108
}
109
 
110
static void PI_res_detach(RLEVEL l, PID p)
111
{
112
  PI_mutex_resource_des *m = (PI_mutex_resource_des *)(resource_table[l]);
113
 
114
  if (m->nlocked[p])
115
    kern_raise(XMUTEX_OWNER_KILLED, p);
116
}
117
 
118
static int PI_level_accept_mutexattr(RLEVEL l, const mutexattr_t *a)
119
{
120
  if (a->mclass == PI_MCLASS || a->mclass == (PI_MCLASS | l) )
121
    return 0;
122
  else
123
    return -1;
124
}
125
 
126
static int PI_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
127
{
128
  PI_mutex_t *p;
129
 
130
  p = (PI_mutex_t *) kern_alloc(sizeof(PI_mutex_t));
131
 
132
  /* control if there is enough memory; no control on init on a
133
     non- destroyed mutex */
134
 
135
  if (!p)
136
    return (ENOMEM);
137
 
138
  p->owner        = NIL;
139
  p->nblocked     = 0;
140
  p->firstblocked = NIL;
141
 
142
  m->mutexlevel   = l;
143
  m->opt          = (void *)p;
144
 
145
  return 0;
146
}
147
 
148
 
149
static int PI_destroy(RLEVEL l, mutex_t *m)
150
{
151
//  PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]);
152
 
153
  if ( ((PI_mutex_t *)m->opt)->nblocked)
154
    return (EBUSY);
155
 
156
  kern_cli();
157
  if (m->opt) {
158
    kern_free(m->opt,sizeof(PI_mutex_t));
159
    m->opt = NULL;
160
  }
161
  kern_sti();
162
 
163
  return 0;
164
}
165
 
166
/* Note that in this approach, when unlocking we can't wake up only
167
   one thread, but we have to wake up all the blocked threads, because there
168
   is not a concept of priority between the task... Each woken thread have
169
   to retest he condition.
170
   Normally, they retest it only one time, because if many threads are
171
   unblocked, they are scheduled basing on their priority (unkown in this
172
   module!)... and if the slice is greather than the critical sections,
173
   they never block!
174
   */
175
static int PI_lock(RLEVEL l, mutex_t *m)
176
{
177
  PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]);
178
  PI_mutex_t *p;
179
//  return 0;
180
  kern_cli();
181
 
182
  p = (PI_mutex_t *)m->opt;
183
  if (!p) {
184
    /* if the mutex is not initialized, initialize it! */
185
    PI_mutexattr_t a;
186
    PI_mutexattr_default(a);
187
    PI_init(l, m, &a);
188
  }
189
 
190
 
191
  if (p->owner == exec_shadow) {
192
    /* the task already owns the mutex */
193
    kern_sti();
194
    return (EDEADLK);
195
  }
196
 
197
  while (p->owner != NIL) {
198
    /* the mutex is locked by someone, "block" the task ...*/
199
    proc_table[exec_shadow].shadow = p->owner;
200
    lev->blocked[exec_shadow] = p->firstblocked;
201
    p->firstblocked = exec_shadow;
202
    p->nblocked++;
203
//    kern_printf("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
204
    /* ... call the scheduler... */
205
    scheduler();
206
    kern_context_load(proc_table[exec_shadow].context);
207
 
208
    /* ... and reaquire the cli() before the test... */
209
    kern_cli();
210
  }
211
 
212
  /* the mutex is free, We can lock it! */
213
  lev->nlocked[exec_shadow]++;
214
 
215
  p->owner = exec_shadow;
216
 
217
  kern_sti();
218
 
219
  return 0;
220
}
221
 
222
static int PI_trylock(RLEVEL l, mutex_t *m)
223
{
224
  PI_mutex_t *p;
225
 
226
  kern_cli();
227
 
228
  p = (PI_mutex_t *)m->opt;
229
  if (!p) {
230
    /* if the mutex is not initialized, initialize it! */
231
    PI_mutexattr_t a;
232
    PI_mutexattr_default(a);
233
    PI_init(l, m, &a);
234
  }
235
 
236
  if (p->owner != NIL) {
237
    /* a task already owns the mutex */
238
    kern_sti();
239
    return (EBUSY);
240
  }
241
  else {
242
    /* the mutex is free */
243
    PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]);
244
    lev->nlocked[exec_shadow]++;
245
 
246
    p->owner = exec_shadow;
247
 
248
    kern_sti();
249
    return 0;
250
  }
251
}
252
 
253
static int PI_unlock(RLEVEL l, mutex_t *m)
254
{
255
  PI_mutex_resource_des *lev;
256
  PI_mutex_t *p;
257
  int i, j;
258
//  return 0;
259
  p = (PI_mutex_t *)m->opt;
260
  if (!p)
261
    return (EINVAL);
262
 
263
  if (p->owner != exec_shadow) {
264
    /* the mutex is owned by another task!!! */
265
    kern_sti();
266
    return (EPERM);
267
  }
268
 
269
  proc_table[exec_shadow].context = kern_context_save();
270
 
271
  /* the mutex is mine */
272
  lev = (PI_mutex_resource_des *)(resource_table[l]);
273
  lev->nlocked[exec_shadow]--;
274
 
275
  p->owner = NIL;
276
 
277
  /* we unblock all the waiting tasks... */
278
  i = p->firstblocked;
279
  p->firstblocked = NIL;
280
 
281
  while (i != NIL) {
282
//    kern_printf("<<%d>>", i);
283
    proc_table[i].shadow = j = i;
284
    i = lev->blocked[i];
285
    lev->blocked[j] = NIL;
286
  }
287
  p->nblocked = 0;
288
 
289
/*  {
290
   int xxx;
291
   kern_printf("(PI_unlock owner=%d ",p->owner);
292
   for (xxx = 0; xxx<5; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow);
293
   kern_printf(")\n");
294
  }*/
295
 
296
  scheduler();
297
  kern_context_load(proc_table[exec_shadow].context);
298
 
299
  return 0;
300
}
301
 
302
void PI_register_module(void)
303
{
304
  RLEVEL l;                  /* the level that we register */
305
  PI_mutex_resource_des *m;  /* for readableness only */
306
  PID i;                     /* a counter */
307
 
308
  printk("PI_register_module\n");
309
 
310
  /* request an entry in the level_table */
311
  l = resource_alloc_descriptor();
312
 
313
  /* alloc the space needed for the EDF_level_des */
314
  m = (PI_mutex_resource_des *)kern_alloc(sizeof(PI_mutex_resource_des));
315
 
316
  /* update the level_table with the new entry */
317
  resource_table[l] = (resource_des *)m;
318
 
319
  /* fill the resource_des descriptor */
320
  strncpy(m->m.r.res_name, PI_MODULENAME, MAX_MODULENAME);
321
  m->m.r.res_code                    = PI_MODULE_CODE;
322
  m->m.r.res_version                 = PI_MODULE_VERSION;
323
 
324
  m->m.r.rtype                       = MUTEX_RTYPE;
325
 
326
  m->m.r.resource_status             = PI_resource_status;
327
  m->m.r.level_accept_resource_model = PI_level_accept_resource_model;
328
  m->m.r.res_register                = PI_res_register;
329
 
330
  m->m.r.res_detach                  = PI_res_detach;
331
 
332
  /* fill the mutex_resource_des descriptor */
333
  m->m.level_accept_mutexattr        = PI_level_accept_mutexattr;
334
  m->m.init                          = PI_init;
335
  m->m.destroy                       = PI_destroy;
336
  m->m.lock                          = PI_lock;
337
  m->m.trylock                       = PI_trylock;
338
  m->m.unlock                        = PI_unlock;
339
 
340
  /* fille the PI_mutex_resource_des descriptor */
341
  for (i=0; i<MAX_PROC; i++) {
342
    m->nlocked[i] = 0;
343
    m->blocked[i] = NIL;
344
  }
345
}
346