Rev 961 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
961 | pj | 1 | /* |
2 | * Project: S.Ha.R.K. |
||
3 | * |
||
4 | * Coordinators: |
||
5 | * Giorgio Buttazzo <giorgio@sssup.it> |
||
6 | * Paolo Gai <pj@gandalf.sssup.it> |
||
7 | * |
||
8 | * Authors : |
||
9 | * Paolo Gai <pj@gandalf.sssup.it> |
||
10 | * Massimiliano Giorgi <massy@gandalf.sssup.it> |
||
11 | * Luca Abeni <luca@gandalf.sssup.it> |
||
12 | * (see the web pages for full authors list) |
||
13 | * |
||
14 | * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
||
15 | * |
||
16 | * http://www.sssup.it |
||
17 | * http://retis.sssup.it |
||
18 | * http://shark.sssup.it |
||
19 | */ |
||
20 | |||
21 | /** |
||
22 | ------------ |
||
23 | CVS : $Id: pi.c,v 1.1 2005-02-25 10:55:09 pj Exp $ |
||
24 | |||
25 | File: $File$ |
||
26 | Revision: $Revision: 1.1 $ |
||
27 | Last update: $Date: 2005-02-25 10:55:09 $ |
||
28 | ------------ |
||
29 | |||
30 | Priority Inhertitance protocol. see pi.h for more details... |
||
31 | |||
32 | **/ |
||
33 | |||
34 | /* |
||
35 | * Copyright (C) 2000 Paolo Gai |
||
36 | * |
||
37 | * This program is free software; you can redistribute it and/or modify |
||
38 | * it under the terms of the GNU General Public License as published by |
||
39 | * the Free Software Foundation; either version 2 of the License, or |
||
40 | * (at your option) any later version. |
||
41 | * |
||
42 | * This program is distributed in the hope that it will be useful, |
||
43 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
44 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||
45 | * GNU General Public License for more details. |
||
46 | * |
||
47 | * You should have received a copy of the GNU General Public License |
||
48 | * along with this program; if not, write to the Free Software |
||
49 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
||
50 | * |
||
51 | */ |
||
52 | |||
53 | |||
54 | #include <pi/pi/pi.h> |
||
55 | |||
56 | #include <ll/ll.h> |
||
1689 | fabio | 57 | #include <arch/string.h> |
58 | #include <arch/stdio.h> |
||
961 | pj | 59 | #include <kernel/const.h> |
60 | #include <sys/types.h> |
||
61 | #include <kernel/descr.h> |
||
62 | #include <kernel/var.h> |
||
63 | #include <kernel/func.h> |
||
64 | |||
65 | #include <tracer.h> |
||
66 | |||
67 | /* The PI resource level descriptor */ |
||
68 | typedef struct { |
||
69 | mutex_resource_des m; /*+ the mutex interface +*/ |
||
70 | |||
71 | int nlocked[MAX_PROC]; /*+ how many mutex a task currently locks +*/ |
||
72 | |||
73 | PID blocked[MAX_PROC]; /*+ blocked queue ... +*/ |
||
74 | } PI_mutex_resource_des; |
||
75 | |||
76 | |||
77 | /* this is the structure normally pointed by the opt field in the |
||
78 | mutex_t structure */ |
||
79 | typedef struct { |
||
80 | PID owner; |
||
81 | int nblocked; |
||
82 | PID firstblocked; |
||
83 | } PI_mutex_t; |
||
84 | |||
85 | |||
86 | |||
87 | #if 0 |
||
88 | /*+ print resource protocol statistics...+*/ |
||
89 | static void PI_resource_status(RLEVEL r) |
||
90 | { |
||
91 | PI_mutex_resource_des *m = (PI_mutex_resource_des *)(resource_table[r]); |
||
92 | PID i; |
||
93 | |||
94 | kern_printf("Resources owned by the tasks:\n"); |
||
95 | for (i=0; i<MAX_PROC; i++) { |
||
96 | kern_printf("%-4d", m->nlocked[i]); |
||
97 | } |
||
98 | } |
||
99 | #endif |
||
100 | |||
101 | static int PI_res_register(RLEVEL l, PID p, RES_MODEL *r) |
||
102 | { |
||
103 | /* priority inheritance works with all tasks without Resource parameters */ |
||
104 | return -1; |
||
105 | } |
||
106 | |||
107 | static void PI_res_detach(RLEVEL l, PID p) |
||
108 | { |
||
109 | PI_mutex_resource_des *m = (PI_mutex_resource_des *)(resource_table[l]); |
||
110 | |||
111 | if (m->nlocked[p]) |
||
112 | kern_raise(XMUTEX_OWNER_KILLED, p); |
||
113 | } |
||
114 | |||
115 | static int PI_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
||
116 | { |
||
117 | PI_mutex_t *p; |
||
118 | |||
119 | if (a->mclass != PI_MCLASS) |
||
120 | return -1; |
||
121 | |||
122 | p = (PI_mutex_t *) kern_alloc(sizeof(PI_mutex_t)); |
||
123 | |||
124 | /* control if there is enough memory; no control on init on a |
||
125 | non- destroyed mutex */ |
||
126 | |||
127 | if (!p) |
||
128 | return (ENOMEM); |
||
129 | |||
130 | p->owner = NIL; |
||
131 | p->nblocked = 0; |
||
132 | p->firstblocked = NIL; |
||
133 | |||
134 | m->mutexlevel = l; |
||
135 | m->opt = (void *)p; |
||
136 | |||
137 | return 0; |
||
138 | } |
||
139 | |||
140 | |||
141 | static int PI_destroy(RLEVEL l, mutex_t *m) |
||
142 | { |
||
143 | // PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]); |
||
144 | SYS_FLAGS f; |
||
145 | |||
146 | if ( ((PI_mutex_t *)m->opt)->nblocked) |
||
147 | return (EBUSY); |
||
148 | |||
149 | f = kern_fsave(); |
||
150 | if (m->opt) { |
||
151 | kern_free(m->opt,sizeof(PI_mutex_t)); |
||
152 | m->opt = NULL; |
||
153 | } |
||
154 | kern_frestore(f); |
||
155 | |||
156 | return 0; |
||
157 | } |
||
158 | |||
159 | /* Note that in this approach, when unlocking we can't wake up only |
||
160 | one thread, but we have to wake up all the blocked threads, because there |
||
161 | is not a concept of priority between the task... Each woken thread have |
||
162 | to retest he condition. |
||
163 | Normally, they retest it only one time, because if many threads are |
||
164 | unblocked, they are scheduled basing on their priority (unkown in this |
||
165 | module!)... and if the slice is greather than the critical sections, |
||
166 | they never block! |
||
167 | */ |
||
168 | static int PI_lock(RLEVEL l, mutex_t *m) |
||
169 | { |
||
170 | PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]); |
||
171 | PI_mutex_t *p; |
||
172 | SYS_FLAGS f; |
||
173 | // return 0; |
||
174 | |||
175 | f = kern_fsave(); |
||
176 | |||
177 | p = (PI_mutex_t *)m->opt; |
||
178 | if (!p) { |
||
179 | /* if the mutex is not initialized, initialize it! */ |
||
180 | PI_mutexattr_t a; |
||
181 | PI_mutexattr_default(a); |
||
182 | PI_init(l, m, &a); |
||
183 | } |
||
184 | |||
185 | |||
186 | if (p->owner == exec_shadow) { |
||
187 | /* the task already owns the mutex */ |
||
188 | kern_frestore(f); |
||
189 | return (EDEADLK); |
||
190 | } |
||
191 | |||
192 | while (p->owner != NIL) { |
||
193 | /* the mutex is locked by someone, "block" the task ...*/ |
||
194 | proc_table[exec_shadow].shadow = p->owner; |
||
195 | lev->blocked[exec_shadow] = p->firstblocked; |
||
196 | p->firstblocked = exec_shadow; |
||
197 | p->nblocked++; |
||
198 | // kern_printf("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"); |
||
199 | /* ... call the scheduler... */ |
||
200 | scheduler(); |
||
201 | TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context); |
||
202 | kern_context_load(proc_table[exec_shadow].context); |
||
203 | |||
204 | /* ... and reaquire the cli() before the test... */ |
||
205 | kern_cli(); |
||
206 | } |
||
207 | |||
208 | /* the mutex is free, We can lock it! */ |
||
209 | lev->nlocked[exec_shadow]++; |
||
210 | |||
211 | p->owner = exec_shadow; |
||
212 | |||
213 | kern_frestore(f); |
||
214 | |||
215 | return 0; |
||
216 | } |
||
217 | |||
218 | static int PI_trylock(RLEVEL l, mutex_t *m) |
||
219 | { |
||
220 | PI_mutex_t *p; |
||
221 | SYS_FLAGS f; |
||
222 | |||
223 | f = kern_fsave(); |
||
224 | |||
225 | p = (PI_mutex_t *)m->opt; |
||
226 | if (!p) { |
||
227 | /* if the mutex is not initialized, initialize it! */ |
||
228 | PI_mutexattr_t a; |
||
229 | PI_mutexattr_default(a); |
||
230 | PI_init(l, m, &a); |
||
231 | } |
||
232 | |||
233 | if (p->owner != NIL) { |
||
234 | /* a task already owns the mutex */ |
||
235 | kern_frestore(f); |
||
236 | return (EBUSY); |
||
237 | } |
||
238 | else { |
||
239 | /* the mutex is free */ |
||
240 | PI_mutex_resource_des *lev = (PI_mutex_resource_des *)(resource_table[l]); |
||
241 | lev->nlocked[exec_shadow]++; |
||
242 | |||
243 | p->owner = exec_shadow; |
||
244 | |||
245 | kern_frestore(f); |
||
246 | return 0; |
||
247 | } |
||
248 | } |
||
249 | |||
250 | static int PI_unlock(RLEVEL l, mutex_t *m) |
||
251 | { |
||
252 | PI_mutex_resource_des *lev; |
||
253 | PI_mutex_t *p; |
||
254 | int i, j; |
||
255 | |||
256 | // return 0; |
||
257 | p = (PI_mutex_t *)m->opt; |
||
258 | if (!p) |
||
259 | return (EINVAL); |
||
260 | |||
261 | if (p->owner != exec_shadow) { |
||
262 | /* the mutex is owned by another task!!! */ |
||
263 | kern_sti(); |
||
264 | return (EPERM); |
||
265 | } |
||
266 | |||
267 | proc_table[exec_shadow].context = kern_context_save(); |
||
268 | |||
269 | /* the mutex is mine */ |
||
270 | lev = (PI_mutex_resource_des *)(resource_table[l]); |
||
271 | lev->nlocked[exec_shadow]--; |
||
272 | |||
273 | p->owner = NIL; |
||
274 | |||
275 | /* we unblock all the waiting tasks... */ |
||
276 | i = p->firstblocked; |
||
277 | p->firstblocked = NIL; |
||
278 | |||
279 | while (i != NIL) { |
||
280 | // kern_printf("<<%d>>", i); |
||
281 | proc_table[i].shadow = j = i; |
||
282 | i = lev->blocked[i]; |
||
283 | lev->blocked[j] = NIL; |
||
284 | } |
||
285 | p->nblocked = 0; |
||
286 | |||
287 | /* { |
||
288 | int xxx; |
||
289 | kern_printf("(PI_unlock owner=%d ",p->owner); |
||
290 | for (xxx = 0; xxx<5; xxx++) kern_printf("p%d s%d|",xxx, proc_table[xxx].shadow); |
||
291 | kern_printf(")\n"); |
||
292 | }*/ |
||
293 | |||
294 | scheduler(); |
||
295 | TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context); |
||
296 | kern_context_load(proc_table[exec_shadow].context); |
||
297 | |||
298 | return 0; |
||
299 | } |
||
300 | |||
301 | RLEVEL PI_register_module(void) |
||
302 | { |
||
303 | RLEVEL l; /* the level that we register */ |
||
304 | PI_mutex_resource_des *m; /* for readableness only */ |
||
305 | PID i; /* a counter */ |
||
306 | |||
307 | printk("PI_register_module\n"); |
||
308 | |||
309 | /* request an entry in the level_table */ |
||
310 | l = resource_alloc_descriptor(); |
||
311 | |||
312 | /* alloc the space needed for the EDF_level_des */ |
||
313 | m = (PI_mutex_resource_des *)kern_alloc(sizeof(PI_mutex_resource_des)); |
||
314 | |||
315 | /* update the level_table with the new entry */ |
||
316 | resource_table[l] = (resource_des *)m; |
||
317 | |||
318 | /* fill the resource_des descriptor */ |
||
319 | m->m.r.rtype = MUTEX_RTYPE; |
||
320 | m->m.r.res_register = PI_res_register; |
||
321 | m->m.r.res_detach = PI_res_detach; |
||
322 | |||
323 | /* fill the mutex_resource_des descriptor */ |
||
324 | m->m.init = PI_init; |
||
325 | m->m.destroy = PI_destroy; |
||
326 | m->m.lock = PI_lock; |
||
327 | m->m.trylock = PI_trylock; |
||
328 | m->m.unlock = PI_unlock; |
||
329 | |||
330 | /* fille the PI_mutex_resource_des descriptor */ |
||
331 | for (i=0; i<MAX_PROC; i++) { |
||
332 | m->nlocked[i] = 0; |
||
333 | m->blocked[i] = NIL; |
||
334 | } |
||
335 | |||
336 | return l; |
||
337 | } |
||
338 |