Subversion Repositories shark

Rev

Rev 3 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 pj 1
/*
2
 * Project: S.Ha.R.K.
3
 *
4
 * Coordinators:
5
 *   Giorgio Buttazzo    <giorgio@sssup.it>
6
 *   Paolo Gai           <pj@gandalf.sssup.it>
7
 *
8
 * Authors     :
9
 *   Paolo Gai           <pj@gandalf.sssup.it>
10
 *   Massimiliano Giorgi <massy@gandalf.sssup.it>
11
 *   Luca Abeni          <luca@gandalf.sssup.it>
12
 *   (see the web pages for full authors list)
13
 *
14
 * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
15
 *
16
 * http://www.sssup.it
17
 * http://retis.sssup.it
18
 * http://shark.sssup.it
19
 */
20
 
21
/**
22
 ------------
38 pj 23
 CVS :        $Id: pi.c,v 1.2 2003-01-07 17:07:50 pj Exp $
2 pj 24
 
25
 File:        $File$
38 pj 26
 Revision:    $Revision: 1.2 $
27
 Last update: $Date: 2003-01-07 17:07:50 $
2 pj 28
 ------------
29
 
30
 Priority Inhertitance protocol. see pi.h for more details...
31
 
32
**/
33
 
34
/*
35
 * Copyright (C) 2000 Paolo Gai
36
 *
37
 * This program is free software; you can redistribute it and/or modify
38
 * it under the terms of the GNU General Public License as published by
39
 * the Free Software Foundation; either version 2 of the License, or
40
 * (at your option) any later version.
41
 *
42
 * This program is distributed in the hope that it will be useful,
43
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
44
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
45
 * GNU General Public License for more details.
46
 *
47
 * You should have received a copy of the GNU General Public License
48
 * along with this program; if not, write to the Free Software
49
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
50
 *
51
 */
52
 
53
 
54
#include <modules/pi.h>
55
 
56
#include <ll/ll.h>
57
#include <ll/string.h>
58
#include <ll/stdio.h>
59
#include <kernel/const.h>
60
#include <sys/types.h>
61
#include <kernel/descr.h>
62
#include <kernel/var.h>
63
#include <kernel/func.h>
64
 
65
/* The PI resource level descriptor */
66
typedef struct {
67
  mutex_resource_des m;   /*+ the mutex interface +*/
68
 
69
  int nlocked[MAX_PROC];  /*+ how many mutex a task currently locks +*/
70
 
71
  PID blocked[MAX_PROC];  /*+ blocked queue ... +*/
72
} PI_mutex_resource_des;
73
 
74
 
75
/* this is the structure normally pointed by the opt field in the
76
   mutex_t structure */
77
typedef struct {
78
  PID owner;
79
  int nblocked;
80
  PID firstblocked;
81
} PI_mutex_t;
82
 
83
 
84
 
38 pj 85
#if 0
2 pj 86
/*+ print resource protocol statistics...+*/
87
static void PI_resource_status(RLEVEL r)
88
{
89
  PI_mutex_resource_des *m = (PI_mutex_resource_des *)(resource_table[r]);
90
  PID i;
91
 
92
  kern_printf("Resources owned by the tasks:\n");
93
  for (i=0; i<MAX_PROC; i++) {
94
     kern_printf("%-4d", m->nlocked[i]);
95
  }
96
}
38 pj 97
#endif
2 pj 98
 
38 pj 99
static int PI_res_register(RLEVEL l, PID p, RES_MODEL *r)
2 pj 100
{
101
  /* priority inheritance works with all tasks without Resource parameters */
102
  return -1;
103
}
104
 
105
static void PI_res_detach(RLEVEL l, PID p)
106
{
107
  PI_mutex_resource_des *m = (PI_mutex_resource_des *)(resource_table[l]);
108
 
109
  if (m->nlocked[p])
110
    kern_raise(XMUTEX_OWNER_KILLED, p);
111
}
112
 
113
static int PI_init(RLEVEL l, mutex_t *m, const mutexattr_t *a)
114
{
115
  PI_mutex_t *p;
116
 
38 pj 117
  if (a->mclass != PI_MCLASS)
118
    return -1;
119
 
2 pj 120
  p = (PI_mutex_t *) kern_alloc(sizeof(PI_mutex_t));
121
 
122
  /* control if there is enough memory; no control on init on a
123
     non- destroyed mutex */
124
 
125
  if (!p)
126
    return (ENOMEM);
127
 
128
  p->owner        = NIL;
129
  p->nblocked     = 0;
130
  p->firstblocked = NIL;
131
 
132
  m->mutexlevel   = l;
133
  m->opt          = (void *)p;
134
 
135
  return 0;
136
}
137
 
138
 
139
static int PI_destroy(RLEVEL l, mutex_t *m)
140
{
141
//  PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]);
142
 
143
  if ( ((PI_mutex_t *)m->opt)->nblocked)
144
    return (EBUSY);
145
 
146
  kern_cli();
147
  if (m->opt) {
148
    kern_free(m->opt,sizeof(PI_mutex_t));
149
    m->opt = NULL;
150
  }
151
  kern_sti();
152
 
153
  return 0;
154
}
155
 
156
/* Note that in this approach, when unlocking we can't wake up only
157
   one thread, but we have to wake up all the blocked threads, because there
158
   is not a concept of priority between the task... Each woken thread have
159
   to retest he condition.
160
   Normally, they retest it only one time, because if many threads are
161
   unblocked, they are scheduled basing on their priority (unkown in this
162
   module!)... and if the slice is greather than the critical sections,
163
   they never block!
164
   */
165
static int PI_lock(RLEVEL l, mutex_t *m)
166
{
167
  PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]);
168
  PI_mutex_t *p;
169
//  return 0;
170
  kern_cli();
171
 
172
  p = (PI_mutex_t *)m->opt;
173
  if (!p) {
174
    /* if the mutex is not initialized, initialize it! */
175
    PI_mutexattr_t a;
176
    PI_mutexattr_default(a);
177
    PI_init(l, m, &a);
178
  }
179
 
180
 
181
  if (p->owner == exec_shadow) {
182
    /* the task already owns the mutex */
183
    kern_sti();
184
    return (EDEADLK);
185
  }
186
 
187
  while (p->owner != NIL) {
188
    /* the mutex is locked by someone, "block" the task ...*/
189
    proc_table[exec_shadow].shadow = p->owner;
190
    lev->blocked[exec_shadow] = p->firstblocked;
191
    p->firstblocked = exec_shadow;
192
    p->nblocked++;
193
//    kern_printf("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
194
    /* ... call the scheduler... */
195
    scheduler();
196
    kern_context_load(proc_table[exec_shadow].context);
197
 
198
    /* ... and reaquire the cli() before the test... */
199
    kern_cli();
200
  }
201
 
202
  /* the mutex is free, We can lock it! */
203
  lev->nlocked[exec_shadow]++;
204
 
205
  p->owner = exec_shadow;
206
 
207
  kern_sti();
208
 
209
  return 0;
210
}
211
 
212
static int PI_trylock(RLEVEL l, mutex_t *m)
213
{
214
  PI_mutex_t *p;
215
 
216
  kern_cli();
217
 
218
  p = (PI_mutex_t *)m->opt;
219
  if (!p) {
220
    /* if the mutex is not initialized, initialize it! */
221
    PI_mutexattr_t a;
222
    PI_mutexattr_default(a);
223
    PI_init(l, m, &a);
224
  }
225
 
226
  if (p->owner != NIL) {
227
    /* a task already owns the mutex */
228
    kern_sti();
229
    return (EBUSY);
230
  }
231
  else {
232
    /* the mutex is free */
233
    PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]);
234
    lev->nlocked[exec_shadow]++;
235
 
236
    p->owner = exec_shadow;
237
 
238
    kern_sti();
239
    return 0;
240
  }
241
}
242
 
243
static int PI_unlock(RLEVEL l, mutex_t *m)
244
{
245
  PI_mutex_resource_des *lev;
246
  PI_mutex_t *p;
247
  int i, j;
248
//  return 0;
249
  p = (PI_mutex_t *)m->opt;
250
  if (!p)
251
    return (EINVAL);
252
 
253
  if (p->owner != exec_shadow) {
254
    /* the mutex is owned by another task!!! */
255
    kern_sti();
256
    return (EPERM);
257
  }
258
 
259
  proc_table[exec_shadow].context = kern_context_save();
260
 
261
  /* the mutex is mine */
262
  lev = (PI_mutex_resource_des *)(resource_table[l]);
263
  lev->nlocked[exec_shadow]--;
264
 
265
  p->owner = NIL;
266
 
267
  /* we unblock all the waiting tasks... */
268
  i = p->firstblocked;
269
  p->firstblocked = NIL;
270
 
271
  while (i != NIL) {
272
//    kern_printf("<<%d>>", i);
273
    proc_table[i].shadow = j = i;
274
    i = lev->blocked[i];
275
    lev->blocked[j] = NIL;
276
  }
277
  p->nblocked = 0;
278
 
279
/*  {
280
   int xxx;
281
   kern_printf("(PI_unlock owner=%d ",p->owner);
282
   for (xxx = 0; xxx<5; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow);
283
   kern_printf(")\n");
284
  }*/
285
 
286
  scheduler();
287
  kern_context_load(proc_table[exec_shadow].context);
288
 
289
  return 0;
290
}
291
 
38 pj 292
RLEVEL PI_register_module(void)
2 pj 293
{
294
  RLEVEL l;                  /* the level that we register */
295
  PI_mutex_resource_des *m;  /* for readableness only */
296
  PID i;                     /* a counter */
297
 
298
  printk("PI_register_module\n");
299
 
300
  /* request an entry in the level_table */
301
  l = resource_alloc_descriptor();
302
 
303
  /* alloc the space needed for the EDF_level_des */
304
  m = (PI_mutex_resource_des *)kern_alloc(sizeof(PI_mutex_resource_des));
305
 
306
  /* update the level_table with the new entry */
307
  resource_table[l] = (resource_des *)m;
308
 
309
  /* fill the resource_des descriptor */
310
  m->m.r.rtype                       = MUTEX_RTYPE;
311
  m->m.r.res_register                = PI_res_register;
312
  m->m.r.res_detach                  = PI_res_detach;
313
 
314
  /* fill the mutex_resource_des descriptor */
315
  m->m.init                          = PI_init;
316
  m->m.destroy                       = PI_destroy;
317
  m->m.lock                          = PI_lock;
318
  m->m.trylock                       = PI_trylock;
319
  m->m.unlock                        = PI_unlock;
320
 
321
  /* fille the PI_mutex_resource_des descriptor */
322
  for (i=0; i<MAX_PROC; i++) {
323
    m->nlocked[i] = 0;
324
    m->blocked[i] = NIL;
325
  }
38 pj 326
 
327
  return l;
2 pj 328
}
329