Rev 3 | Rev 38 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2 | pj | 1 | /* |
2 | * Project: S.Ha.R.K. |
||
3 | * |
||
4 | * Coordinators: |
||
5 | * Giorgio Buttazzo <giorgio@sssup.it> |
||
6 | * Paolo Gai <pj@gandalf.sssup.it> |
||
7 | * |
||
8 | * Authors : |
||
9 | * Paolo Gai <pj@gandalf.sssup.it> |
||
10 | * Massimiliano Giorgi <massy@gandalf.sssup.it> |
||
11 | * Luca Abeni <luca@gandalf.sssup.it> |
||
12 | * (see the web pages for full authors list) |
||
13 | * |
||
14 | * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
||
15 | * |
||
16 | * http://www.sssup.it |
||
17 | * http://retis.sssup.it |
||
18 | * http://shark.sssup.it |
||
19 | */ |
||
20 | |||
21 | /** |
||
22 | ------------ |
||
29 | pj | 23 | CVS : $Id: conditio.c,v 1.2 2002-11-11 08:34:08 pj Exp $ |
2 | pj | 24 | |
25 | File: $File$ |
||
29 | pj | 26 | Revision: $Revision: 1.2 $ |
27 | Last update: $Date: 2002-11-11 08:34:08 $ |
||
2 | pj | 28 | ------------ |
29 | |||
30 | This file contains the condition variables handling functions. |
||
31 | |||
32 | **/ |
||
33 | |||
34 | /* |
||
35 | * Copyright (C) 2000 Paolo Gai |
||
36 | * |
||
37 | * This program is free software; you can redistribute it and/or modify |
||
38 | * it under the terms of the GNU General Public License as published by |
||
39 | * the Free Software Foundation; either version 2 of the License, or |
||
40 | * (at your option) any later version. |
||
41 | * |
||
42 | * This program is distributed in the hope that it will be useful, |
||
43 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
44 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||
45 | * GNU General Public License for more details. |
||
46 | * |
||
47 | * You should have received a copy of the GNU General Public License |
||
48 | * along with this program; if not, write to the Free Software |
||
49 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
||
50 | * |
||
51 | */ |
||
52 | |||
53 | |||
54 | |||
55 | #include <kernel/const.h> |
||
56 | #include <sys/types.h> |
||
57 | #include <kernel/model.h> |
||
58 | #include <kernel/descr.h> |
||
59 | #include <kernel/var.h> |
||
60 | #include <kernel/func.h> |
||
61 | #include <errno.h> |
||
29 | pj | 62 | #include <kernel/iqueue.h> |
2 | pj | 63 | |
64 | /*---------------------------------------------------------------------*/ |
||
65 | /* Condition variables */ |
||
66 | /*---------------------------------------------------------------------*/ |
||
67 | |||
68 | static int condition_once = 1; |
||
69 | |||
70 | /* this is the test that is done when a task is being killed |
||
71 | and it is waiting on a condition */ |
||
72 | static int condition_cancellation_point(PID i, void *arg) |
||
73 | { |
||
74 | LEVEL l; |
||
75 | |||
76 | if (proc_table[i].status == WAIT_COND) { |
||
77 | /* if the task is waiting on a condition variable, we have to extract it |
||
78 | from the waiters queue, then set the KILL_REQUEST flag, and reinsert |
||
79 | the task into the ready queue so it can reaquire the mutex and die */ |
||
29 | pj | 80 | iq_extract(i,&proc_table[i].cond_waiting->waiters); |
81 | if (iq_isempty(&proc_table[i].cond_waiting->waiters)) |
||
2 | pj | 82 | proc_table[i].cond_waiting->used_for_waiting = NULL; |
83 | proc_table[i].cond_waiting = NULL; |
||
84 | |||
85 | l = proc_table[i].task_level; |
||
86 | level_table[l]->task_insert(l,i); |
||
87 | /* then, the kill_request flag is set, and when the task is rescheduled |
||
88 | it autokill itself... */ |
||
89 | |||
90 | return 1; |
||
91 | } |
||
92 | |||
93 | return 0; |
||
94 | } |
||
95 | |||
96 | |||
97 | int cond_init(cond_t *cond) |
||
98 | { |
||
99 | /* first, if it is the first time that the cond_init is called, |
||
100 | register the cancellation point */ |
||
101 | if (condition_once) { |
||
102 | condition_once = 0; |
||
103 | register_cancellation_point(condition_cancellation_point, NULL); |
||
104 | } |
||
105 | |||
29 | pj | 106 | iq_init (&cond->waiters, &freedesc, 0); |
107 | |||
2 | pj | 108 | cond->used_for_waiting = NULL; |
109 | |||
110 | return 0; |
||
111 | } |
||
112 | |||
113 | int cond_destroy(cond_t *cond) |
||
114 | { |
||
29 | pj | 115 | if (!iq_isempty(&cond->waiters)) |
2 | pj | 116 | return (EBUSY); |
117 | |||
118 | return 0; |
||
119 | } |
||
120 | |||
121 | int cond_signal(cond_t *cond) |
||
122 | { |
||
123 | LEVEL l; |
||
124 | PID p; |
||
125 | |||
126 | proc_table[exec_shadow].context = kern_context_save(); |
||
127 | |||
29 | pj | 128 | if (!iq_isempty(&cond->waiters)) { |
129 | p = iq_getfirst(&cond->waiters); |
||
2 | pj | 130 | |
131 | l = proc_table[p].task_level; |
||
132 | level_table[l]->task_insert(l,p); |
||
133 | |||
134 | scheduler(); |
||
135 | } |
||
136 | |||
137 | kern_context_load(proc_table[exec_shadow].context); |
||
138 | return 0; |
||
139 | } |
||
140 | |||
141 | int cond_broadcast(cond_t *cond) |
||
142 | { |
||
143 | LEVEL l; |
||
144 | PID p; |
||
145 | |||
146 | proc_table[exec_shadow].context = kern_context_save(); |
||
147 | |||
29 | pj | 148 | if (!iq_isempty(&cond->waiters)) { |
2 | pj | 149 | do { |
29 | pj | 150 | p = iq_getfirst(&cond->waiters); |
2 | pj | 151 | |
152 | l = proc_table[p].task_level; |
||
153 | level_table[l]->task_insert(l,p); |
||
29 | pj | 154 | } while(!iq_isempty(&cond->waiters)); |
2 | pj | 155 | |
156 | scheduler(); |
||
157 | } |
||
158 | kern_context_load(proc_table[exec_shadow].context); |
||
159 | return 0; |
||
160 | } |
||
161 | |||
162 | int cond_wait(cond_t *cond, mutex_t *mutex) |
||
163 | { |
||
164 | LEVEL l; |
||
165 | struct timespec ty; |
||
166 | TIME tx; |
||
167 | |||
168 | /* Why I used task_nopreempt???... because we have to unlock the mutex, |
||
169 | and we can't call mutex_unlock after kern_context_save (the unlock |
||
170 | could call context_save itself...) */ |
||
171 | task_nopreempt(); |
||
172 | |||
173 | /* First, the cond_wait is a cancellation point... */ |
||
174 | task_testcancel(); |
||
175 | |||
176 | /* all the task that uses NOW this condition are waiting on the same |
||
177 | mutex??? */ |
||
178 | if (cond->used_for_waiting) { |
||
179 | if (cond->used_for_waiting != mutex) { |
||
180 | task_preempt(); |
||
181 | return (EINVAL); |
||
182 | } |
||
183 | } |
||
184 | else |
||
185 | cond->used_for_waiting = mutex; |
||
186 | |||
187 | /* If the task is not canceled with testcancel, we block it now... */ |
||
188 | |||
189 | /* The mutex can't be destroyed while we are waiting on a condition, |
||
190 | so we tell the mutex that a task is using it althought it is not |
||
191 | busy (the cond_wait have to unlock the mutex!!!)... */ |
||
192 | mutex->use++; |
||
193 | if (mutex_unlock(mutex)) { |
||
194 | /* some problems unlocking the mutex... */ |
||
195 | mutex->use--; |
||
196 | task_preempt(); |
||
197 | return (EINVAL); |
||
198 | } |
||
199 | |||
200 | /* now, we really block the task... */ |
||
201 | proc_table[exec_shadow].context = kern_context_save(); |
||
202 | |||
203 | /* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
||
204 | ll_gettime(TIME_EXACT, &schedule_time); |
||
205 | SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
||
206 | tx = TIMESPEC2USEC(&ty); |
||
207 | proc_table[exec_shadow].avail_time -= tx; |
||
208 | jet_update_slice(tx); |
||
209 | if (cap_timer != NIL) { |
||
210 | event_delete(cap_timer); |
||
211 | cap_timer = NIL; |
||
212 | } |
||
213 | |||
214 | l = proc_table[exec_shadow].task_level; |
||
215 | level_table[l]->task_extract(l,exec_shadow); |
||
216 | |||
217 | /* we insert the task in the condition queue */ |
||
218 | proc_table[exec_shadow].status = WAIT_COND; |
||
29 | pj | 219 | iq_priority_insert(exec_shadow,&cond->waiters); |
2 | pj | 220 | |
221 | /* then, we set into the processor descriptor the condition on that |
||
222 | the task is blocked... (if the task is killed while it is waiting |
||
223 | on the condition, we have to remove it from the waiters queue, so |
||
224 | we need the condition variable...) */ |
||
225 | proc_table[exec_shadow].cond_waiting = cond; |
||
226 | |||
227 | /* and finally we reschedule */ |
||
228 | exec = exec_shadow = -1; |
||
229 | scheduler(); |
||
230 | ll_context_to(proc_table[exec_shadow].context); |
||
231 | kern_sti(); |
||
232 | |||
233 | /* when we arrive here: |
||
234 | - the task did't die while it was waiting on the condition |
||
235 | (normally, the cancelability state is set to deferred; |
||
236 | if someone kills the task, we have first to lock the mutex, |
||
237 | and then die. Furthermore no cond_signal can be catched by a task |
||
238 | that have to die, so in the makefree function we extract the |
||
239 | task from the waiters queue) |
||
240 | - the task still in the non-preemptive state |
||
241 | - the task have to reaquire the mutex to test again the condition |
||
242 | - the task have to reset the cond_waiting pointer set before |
||
243 | */ |
||
244 | if (proc_table[exec_shadow].cond_waiting != NULL) { |
||
245 | proc_table[exec_shadow].cond_waiting = NULL; |
||
246 | |||
29 | pj | 247 | if (iq_isempty(&cond->waiters)) cond->used_for_waiting = NULL; |
2 | pj | 248 | } |
249 | task_preempt(); |
||
250 | |||
251 | mutex_lock(mutex); |
||
252 | mutex->use--; |
||
253 | |||
254 | task_testcancel(); |
||
255 | |||
256 | return 0; |
||
257 | } |
||
258 | |||
259 | |||
260 | /* if this event fires the task passed as argument is blocked on a condition |
||
261 | with a cond_timedwait. |
||
262 | We have to: |
||
263 | - extract the task from the waiters queue, because the task has to be |
||
264 | woken up and must not use any cond_signal |
||
265 | - reset the delaytimer... |
||
266 | - call the task-insert... |
||
267 | */ |
||
268 | void condition_timer(void *arg) |
||
269 | { |
||
270 | PID p = (PID)arg; |
||
271 | LEVEL l; |
||
272 | |||
29 | pj | 273 | iq_extract(p,&proc_table[p].cond_waiting->waiters); |
274 | if (iq_isempty(&proc_table[p].cond_waiting->waiters)) |
||
2 | pj | 275 | proc_table[p].cond_waiting->used_for_waiting = NULL; |
276 | proc_table[p].cond_waiting = NULL; |
||
277 | |||
278 | proc_table[p].delay_timer = -1; |
||
279 | |||
280 | l = proc_table[p].task_level; |
||
281 | level_table[l]->task_insert(l,p); |
||
282 | |||
283 | event_need_reschedule(); |
||
284 | } |
||
285 | |||
286 | int cond_timedwait(cond_t *cond, mutex_t *mutex, |
||
287 | const struct timespec *abstime) |
||
288 | { |
||
289 | LEVEL l; |
||
290 | int returnvalue = 0; |
||
291 | struct timespec ty; |
||
292 | TIME tx; |
||
293 | |||
294 | /* Why I used task_nopreempt???... because we have to unlock the mutex, |
||
295 | and we can't call mutex_unlock after kern_context_save (the unlock |
||
296 | could call context_save itself...) */ |
||
297 | task_nopreempt(); |
||
298 | |||
299 | /* First, the cond_wait is a cancellation point... */ |
||
300 | task_testcancel(); |
||
301 | |||
302 | /* all the task that uses NOW this condition are waiting on the same |
||
303 | mutex??? */ |
||
304 | if (cond->used_for_waiting) { |
||
305 | if (cond->used_for_waiting != mutex) { |
||
306 | task_preempt(); |
||
307 | return (EINVAL); |
||
308 | } |
||
309 | } |
||
310 | else |
||
311 | cond->used_for_waiting = mutex; |
||
312 | |||
313 | /* If the task is not canceled with testcancel, we block it now... */ |
||
314 | |||
315 | /* The mutex can't be destroyed while we are waiting on a condition, |
||
316 | so we tell the mutex that a task is using it althought it is not |
||
317 | busy (the cond_wait have to unlock the mutex!!!)... */ |
||
318 | mutex->use++; |
||
319 | if (mutex_unlock(mutex)) { |
||
320 | /* some problems unlocking the mutex... */ |
||
321 | mutex->use--; |
||
322 | task_preempt(); |
||
323 | return (EINVAL); |
||
324 | } |
||
325 | |||
326 | /* now, we really block the task... */ |
||
327 | proc_table[exec_shadow].context = kern_context_save(); |
||
328 | |||
329 | /* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
||
330 | ll_gettime(TIME_EXACT, &schedule_time); |
||
331 | SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
||
332 | tx = TIMESPEC2USEC(&ty); |
||
333 | proc_table[exec_shadow].avail_time -= tx; |
||
334 | jet_update_slice(tx); |
||
335 | if (cap_timer != NIL) { |
||
336 | event_delete(cap_timer); |
||
337 | cap_timer = NIL; |
||
338 | } |
||
339 | |||
340 | l = proc_table[exec_shadow].task_level; |
||
341 | level_table[l]->task_extract(l,exec_shadow); |
||
342 | |||
343 | /* we insert the task in the condition queue */ |
||
344 | proc_table[exec_shadow].status = WAIT_COND; |
||
29 | pj | 345 | iq_priority_insert(exec_shadow,&cond->waiters); |
2 | pj | 346 | |
347 | /* then, we set into the processor descriptor the condition on that |
||
348 | the task is blocked... (if the task is killed while it is waiting |
||
349 | on the condition, we have to remove it from the waiters queue, so |
||
350 | we need the condition variable...) */ |
||
351 | proc_table[exec_shadow].cond_waiting = cond; |
||
352 | |||
353 | /* we can use the delaytimer because if we are here we are not in a |
||
354 | task_delay */ |
||
355 | proc_table[exec_shadow].delay_timer = |
||
356 | kern_event_post(abstime,condition_timer,(void *)exec_shadow); |
||
357 | |||
358 | /* and finally we reschedule */ |
||
359 | exec = exec_shadow = -1; |
||
360 | scheduler(); |
||
361 | ll_context_to(proc_table[exec_shadow].context); |
||
362 | |||
363 | if (proc_table[exec_shadow].delay_timer != -1) |
||
364 | event_delete(proc_table[exec_shadow].delay_timer); |
||
365 | |||
366 | kern_sti(); |
||
367 | |||
368 | /* when we arrive here: |
||
369 | - the task did't die while it was waiting on the condition |
||
370 | (normally, the cancelability state is set to deferred; |
||
371 | if someone kills the task, we have first to lock the mutex, |
||
372 | and then die. Furthermore no cond_signal can be catched by a task |
||
373 | that have to die, so in the makefree function we extract the |
||
374 | task from the waiters queue) |
||
375 | - the task still in the non-preemptive state |
||
376 | - the task have to reaquire the mutex to test again the condition |
||
377 | - the task have to reset the cond_waiting pointer set before |
||
378 | Note that cond_wait has to be called with cancelability set to |
||
379 | deferred... so we insert a testcancel after the mutex_lock... |
||
380 | */ |
||
381 | if (proc_table[exec_shadow].cond_waiting != NULL) { |
||
382 | proc_table[exec_shadow].cond_waiting = NULL; |
||
383 | |||
29 | pj | 384 | if (iq_isempty(&cond->waiters)) cond->used_for_waiting = NULL; |
2 | pj | 385 | } |
386 | else |
||
387 | /* cond_waiting == NULL if the task is killed or the timer has fired */ |
||
388 | returnvalue = ETIMEDOUT; |
||
389 | |||
390 | task_preempt(); |
||
391 | |||
392 | mutex_lock(mutex); |
||
393 | mutex->use--; |
||
394 | |||
395 | task_testcancel(); |
||
396 | |||
397 | return returnvalue; |
||
398 | } |