Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1670 | pj | 1 | #include "hlp.h" |
2 | |||
3 | #include <ll/ll.h> |
||
4 | #include <ll/string.h> |
||
5 | #include <ll/stdio.h> |
||
6 | #include <kernel/const.h> |
||
7 | #include <sys/types.h> |
||
8 | #include <kernel/descr.h> |
||
9 | #include <kernel/var.h> |
||
10 | #include <kernel/func.h> |
||
11 | |||
12 | #include <tracer.h> |
||
13 | |||
14 | /* Just for debugging */ |
||
15 | static void print_shadow(char *label, HLP_mutex_resource_des *l) |
||
16 | { |
||
17 | HLP_tasklist_t *x = NULL; |
||
18 | |||
19 | kern_printf("HLP module: %s:( ", label); |
||
20 | for (x = l->tasklist; x != NULL; x = x->next) |
||
21 | { |
||
22 | kern_printf("(%d, %d) ", x->pid, proc_table[x->pid].shadow); |
||
23 | } |
||
24 | kern_printf(" )\n"); |
||
25 | } |
||
26 | |||
27 | /* ----------------------------------------------------------------------- |
||
28 | LISTS HANDLING |
||
29 | ----------------------------------------------------------------------- */ |
||
30 | |||
31 | /* insert the task in the list ordered by preemption level */ |
||
32 | static void HLP_insert_tasklist(HLP_tasklist_t **list, PID pid, DWORD preempt) |
||
33 | { |
||
34 | HLP_tasklist_t *tmp = NULL, *scan = NULL, *prev = NULL; |
||
35 | |||
36 | tmp = (HLP_tasklist_t *)kern_alloc(sizeof(HLP_tasklist_t)); |
||
37 | tmp->pid = pid; |
||
38 | tmp->preempt = preempt; |
||
39 | tmp->prev = NULL; |
||
40 | tmp->next = NULL; |
||
41 | |||
42 | scan = *list; |
||
43 | |||
44 | while (scan && scan->preempt > preempt) |
||
45 | { |
||
46 | prev = scan; |
||
47 | scan = scan->next; |
||
48 | } |
||
49 | |||
50 | if (prev == NULL) /* empty lista */ |
||
51 | { |
||
52 | *list = tmp; |
||
53 | tmp->next = NULL; |
||
54 | tmp->prev = prev; |
||
55 | } |
||
56 | else |
||
57 | if (scan == NULL) /* last position */ |
||
58 | { |
||
59 | tmp->next = NULL; |
||
60 | tmp->prev = prev; |
||
61 | prev->next = tmp; |
||
62 | } |
||
63 | else |
||
64 | { |
||
65 | tmp->next = scan->next; |
||
66 | scan->next->prev = tmp; |
||
67 | scan->next = tmp; |
||
68 | } |
||
69 | } |
||
70 | |||
71 | static void HLP_extract_tasklist(HLP_tasklist_t **list, PID pid) |
||
72 | { |
||
73 | HLP_tasklist_t *scan = *list; |
||
74 | |||
75 | while (scan && scan->pid != pid) |
||
76 | scan = scan->next; |
||
77 | |||
78 | if (scan != NULL) |
||
79 | { |
||
80 | scan->prev->next = scan->next; |
||
81 | scan->next->prev = scan->prev; |
||
82 | kern_free(scan, (sizeof(HLP_tasklist_t))); |
||
83 | } |
||
84 | } |
||
85 | |||
86 | HLP_tasklist_t *HLP_tasklist_with_pid(HLP_tasklist_t *list, PID p) |
||
87 | { |
||
88 | HLP_tasklist_t *ret = list; |
||
89 | |||
90 | while (ret && ret->pid !=p) |
||
91 | ret = ret->next; |
||
92 | |||
93 | return ret; |
||
94 | } |
||
95 | |||
96 | /* ----------------------------------------------------------------------- |
||
97 | End of LISTS HANDLING |
||
98 | ----------------------------------------------------------------------- */ |
||
99 | |||
100 | /** |
||
101 | * HLP_res_register |
||
102 | * |
||
103 | * Called when a task_create is made and when HLP_usemutex is used |
||
104 | */ |
||
105 | |||
106 | static int HLP_res_register(RLEVEL l, PID p, RES_MODEL *r) |
||
107 | { |
||
108 | HLP_mutex_resource_des *m = (HLP_mutex_resource_des *)(resource_table[l]); |
||
109 | HLP_tasklist_t *tasklist = NULL; |
||
110 | |||
111 | if (r->level && r->level !=l) |
||
112 | return -1; |
||
113 | |||
114 | if (r->rclass == HLP_RCLASS) /* register task */ |
||
115 | { |
||
116 | HLP_RES_MODEL *hlp = (HLP_RES_MODEL *)r; |
||
117 | |||
118 | HLP_insert_tasklist(&(m->tasklist), p, hlp->preempt); /* insert into HLP system task */ |
||
119 | |||
120 | return 0; |
||
121 | } |
||
122 | else if (r->rclass == HLP2_RCLASS) /* a mutex passed via HLP_useres() */ |
||
123 | { |
||
124 | HLP_mutex_t *mut = (HLP_mutex_t *)r; |
||
125 | |||
126 | /* register mutex for task p */ |
||
127 | tasklist = HLP_tasklist_with_pid(m->tasklist, p); |
||
128 | |||
129 | if (tasklist == NULL) |
||
130 | return -1; |
||
131 | |||
132 | mut->next = tasklist->mutexlist; |
||
133 | tasklist->mutexlist = mut; |
||
134 | |||
135 | /* register task for mutex */ |
||
136 | HLP_insert_tasklist(&(mut->tasklist), tasklist->pid, tasklist->preempt); |
||
137 | |||
138 | kern_printf("HLP module: process %d uses mutex %p\n", tasklist->pid, mut); |
||
139 | return 0; |
||
140 | } |
||
141 | else |
||
142 | return -1; |
||
143 | } |
||
144 | |||
145 | |||
146 | /* called when task_kill is made */ |
||
147 | static void HLP_res_detach(RLEVEL l, PID p) |
||
148 | { |
||
149 | HLP_mutex_resource_des *m = (HLP_mutex_resource_des *)(resource_table[l]); |
||
150 | HLP_mutex_t *mut; |
||
151 | HLP_tasklist_t *cur; |
||
152 | |||
153 | cur = HLP_tasklist_with_pid(m->tasklist, p); |
||
154 | |||
155 | for (mut = cur->mutexlist; mut; mut = mut->next) |
||
156 | if (mut->owner == p) |
||
157 | kern_raise(XMUTEX_OWNER_KILLED, p); |
||
158 | |||
159 | for (mut = m->mutexlist; mut; mut = mut->next) |
||
160 | HLP_extract_tasklist(&(mut->tasklist), p); |
||
161 | |||
162 | /* remove the task from the tasklist */ |
||
163 | HLP_extract_tasklist(&(m->tasklist), p); |
||
164 | } |
||
165 | |||
166 | /* called when a mutex_init on a HLP is made */ |
||
167 | static int HLP_init(RLEVEL l, mutex_t *m, const mutexattr_t *a) |
||
168 | { |
||
169 | HLP_mutex_resource_des *lev = (HLP_mutex_resource_des *)(resource_table[l]); |
||
170 | HLP_mutex_t *mut; |
||
171 | |||
172 | |||
173 | if (a->mclass != HLP_MCLASS) |
||
174 | return -1; |
||
175 | |||
176 | mut = (HLP_mutex_t *) kern_alloc(sizeof(HLP_mutex_t)); |
||
177 | |||
178 | /* control if there is enough memory; no control on init on a |
||
179 | non- destroyed mutex */ |
||
180 | |||
181 | if (!mut) |
||
182 | return (ENOMEM); |
||
183 | |||
184 | res_default_model(mut->r, HLP2_RCLASS); |
||
185 | |||
186 | mut->owner = NIL; |
||
187 | mut->tasklist = NULL; |
||
188 | mut->next = lev->mutexlist; |
||
189 | |||
190 | lev->mutexlist = mut; |
||
191 | |||
192 | m->mutexlevel = l; |
||
193 | m->opt = (void *)mut; |
||
194 | |||
195 | return 0; |
||
196 | } |
||
197 | |||
198 | static int HLP_destroy(RLEVEL l, mutex_t *m) |
||
199 | { |
||
200 | HLP_mutex_resource_des *lev = (HLP_mutex_resource_des *)(resource_table[l]); |
||
201 | HLP_mutex_t *mut; |
||
202 | SYS_FLAGS f; |
||
203 | |||
204 | f = kern_fsave(); |
||
205 | |||
206 | mut = m->opt; |
||
207 | |||
208 | if (mut->owner != NIL) |
||
209 | return (EBUSY); |
||
210 | |||
211 | /* extracting from the list of system mutexes */ |
||
212 | lev = lev; |
||
213 | // HLP_extract_hlplist(lev, mut); |
||
214 | |||
215 | if (m->opt) { |
||
216 | kern_free(m->opt,sizeof(HLP_mutex_t)); |
||
217 | m->opt = NULL; |
||
218 | } |
||
219 | kern_frestore(f); |
||
220 | |||
221 | return 0; |
||
222 | } |
||
223 | |||
224 | /* mutex_lock */ |
||
225 | static int HLP_lock(RLEVEL l, mutex_t *m) |
||
226 | { |
||
227 | HLP_mutex_resource_des *lev = (HLP_mutex_resource_des *)(resource_table[l]); |
||
228 | HLP_mutex_t *mut; |
||
229 | HLP_tasklist_t *current, *taskscan; |
||
230 | |||
231 | SYS_FLAGS f; |
||
232 | |||
233 | f = kern_fsave(); |
||
234 | |||
235 | mut = (HLP_mutex_t *)m->opt; |
||
236 | |||
237 | if (!mut) { |
||
238 | /* if the mutex is not initialized */ |
||
239 | kern_frestore(f); |
||
240 | return (EINVAL); |
||
241 | } |
||
242 | |||
243 | if (mut->owner == exec_shadow) { |
||
244 | /* the task already owns the mutex */ |
||
245 | kern_frestore(f); |
||
246 | return (EDEADLK); |
||
247 | } |
||
248 | |||
249 | current = HLP_tasklist_with_pid(mut->tasklist, exec_shadow); |
||
250 | |||
251 | if (current == NULL || /* se il task non usa il mutex */ |
||
252 | mut->owner != NIL) /* o il mutex e' gia' lockato */ |
||
253 | { |
||
254 | kern_raise(XHLP_INVALID_LOCK, exec_shadow); |
||
255 | kern_frestore(f); |
||
256 | return (EINVAL); |
||
257 | } |
||
258 | |||
259 | /* we know that: |
||
260 | - the task and the mutex that it wants to lock |
||
261 | - the mutex is free |
||
262 | => the task can lock now the mutex |
||
263 | */ |
||
264 | |||
265 | mut->owner = exec_shadow; |
||
266 | |||
267 | print_shadow("prelock", lev); |
||
268 | |||
269 | /* shadow of tasks using the same mutexes, have to point to exec_shadow */ |
||
270 | |||
271 | for (taskscan = current->prev; taskscan != NULL; taskscan = taskscan->prev) |
||
272 | proc_table[taskscan->pid].shadow = exec_shadow; |
||
273 | |||
274 | print_shadow("postlock", lev); |
||
275 | |||
276 | kern_frestore(f); |
||
277 | |||
278 | return 0; |
||
279 | } |
||
280 | |||
281 | /* HLP_trylock is equal to HLP_lock because the HLP_lock don't block !!! */ |
||
282 | |||
283 | static int HLP_unlock(RLEVEL l, mutex_t *m) |
||
284 | { |
||
285 | HLP_mutex_resource_des *lev; |
||
286 | HLP_mutex_t *mut, *mutscan; |
||
287 | HLP_tasklist_t *current, *taskscan, *taskowner; |
||
288 | |||
289 | lev = (HLP_mutex_resource_des *)(resource_table[l]); |
||
290 | mut = (HLP_mutex_t *)m->opt; |
||
291 | |||
292 | if (!mut) |
||
293 | { |
||
294 | kern_sti(); |
||
295 | return (EINVAL); |
||
296 | } |
||
297 | |||
298 | if (mut->owner != exec_shadow) { |
||
299 | /* the mutex is owned by another task!!! */ |
||
300 | kern_sti(); |
||
301 | return (EPERM); |
||
302 | } |
||
303 | |||
304 | proc_table[exec_shadow].context = kern_context_save(); |
||
305 | |||
306 | mut->owner = NIL; |
||
307 | |||
308 | /* recalculate shadow for tasks with preemption level upper |
||
309 | than tasks with shadow pointing to current task */ |
||
310 | |||
311 | current = HLP_tasklist_with_pid(lev->tasklist, exec_shadow); |
||
312 | |||
313 | print_shadow("preunlock", lev); |
||
314 | |||
315 | /* for on tasks with upper preemption level */ |
||
316 | for (taskscan = current->prev; taskscan; taskscan = taskscan->prev) |
||
317 | { |
||
318 | /* task affected by hlp. Need to recalc the shadow */ |
||
319 | if (proc_table[taskscan->pid].shadow == exec_shadow && |
||
320 | taskscan->pid != current->pid) |
||
321 | { |
||
322 | proc_table[taskscan->pid].shadow = taskscan->pid; |
||
323 | for (mutscan = taskscan->mutexlist; mutscan; mutscan = mutscan->next) |
||
324 | { |
||
325 | if (mutscan->owner != NIL) |
||
326 | { |
||
327 | taskowner = HLP_tasklist_with_pid(mutscan->tasklist, mutscan->owner); |
||
328 | if (taskowner->preempt <= taskscan->preempt) |
||
329 | proc_table[taskscan->pid].shadow = mutscan->owner; |
||
330 | } |
||
331 | } |
||
332 | } |
||
333 | } |
||
334 | |||
335 | print_shadow("postunlock", lev); |
||
336 | |||
337 | scheduler(); |
||
338 | TRACER_LOGEVENT(FTrace_EVT_inheritance,(unsigned short int)proc_table[exec_shadow].context,(unsigned int)proc_table[exec].context); |
||
339 | kern_context_load(proc_table[exec_shadow].context); |
||
340 | |||
341 | return 0; |
||
342 | } |
||
343 | |||
344 | RLEVEL HLP_register_module(void) |
||
345 | { |
||
346 | RLEVEL l; /* the level that we register */ |
||
347 | HLP_mutex_resource_des *m; /* for readableness only */ |
||
348 | |||
349 | printk("HLP_register_module\n"); |
||
350 | |||
351 | /* request an entry in the level_table */ |
||
352 | l = resource_alloc_descriptor(); |
||
353 | |||
354 | /* alloc the space needed for the EDF_level_des */ |
||
355 | m = (HLP_mutex_resource_des *)kern_alloc(sizeof(HLP_mutex_resource_des)); |
||
356 | |||
357 | /* update the level_table with the new entry */ |
||
358 | resource_table[l] = (resource_des *)m; |
||
359 | |||
360 | /* fill the resource_des descriptor */ |
||
361 | m->m.r.rtype = MUTEX_RTYPE; |
||
362 | m->m.r.res_register = HLP_res_register; |
||
363 | m->m.r.res_detach = HLP_res_detach; |
||
364 | |||
365 | /* fill the mutex_resource_des descriptor */ |
||
366 | m->m.init = HLP_init; |
||
367 | m->m.destroy = HLP_destroy; |
||
368 | m->m.lock = HLP_lock; |
||
369 | m->m.trylock = HLP_lock; /* equal!!! */ |
||
370 | m->m.unlock = HLP_unlock; |
||
371 | |||
372 | /* fill the HLP_mutex_resource_des descriptor */ |
||
373 | m->tasklist = NULL; |
||
374 | m->mutexlist = NULL; |
||
375 | |||
376 | return l; |
||
377 | } |