Rev 3 | Rev 38 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2 | pj | 1 | /* |
2 | * Project: S.Ha.R.K. |
||
3 | * |
||
4 | * Coordinators: |
||
5 | * Giorgio Buttazzo <giorgio@sssup.it> |
||
6 | * Paolo Gai <pj@gandalf.sssup.it> |
||
7 | * |
||
8 | * Authors : |
||
9 | * Paolo Gai <pj@gandalf.sssup.it> |
||
10 | * (see the web pages for full authors list) |
||
11 | * |
||
12 | * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
||
13 | * |
||
14 | * http://www.sssup.it |
||
15 | * http://retis.sssup.it |
||
16 | * http://shark.sssup.it |
||
17 | */ |
||
18 | |||
19 | /** |
||
20 | ------------ |
||
29 | pj | 21 | CVS : $Id: signal.c,v 1.2 2002-11-11 08:34:09 pj Exp $ |
2 | pj | 22 | |
23 | File: $File$ |
||
29 | pj | 24 | Revision: $Revision: 1.2 $ |
25 | Last update: $Date: 2002-11-11 08:34:09 $ |
||
2 | pj | 26 | ------------ |
27 | |||
28 | This file contains: |
||
29 | |||
30 | Signal Handling |
||
31 | |||
32 | - Data structures |
||
33 | - sigset_t handling functions |
||
34 | |||
35 | **/ |
||
36 | |||
37 | /* |
||
38 | * Copyright (C) 2000 Paolo Gai |
||
39 | * |
||
40 | * This program is free software; you can redistribute it and/or modify |
||
41 | * it under the terms of the GNU General Public License as published by |
||
42 | * the Free Software Foundation; either version 2 of the License, or |
||
43 | * (at your option) any later version. |
||
44 | * |
||
45 | * This program is distributed in the hope that it will be useful, |
||
46 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
47 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||
48 | * GNU General Public License for more details. |
||
49 | * |
||
50 | * You should have received a copy of the GNU General Public License |
||
51 | * along with this program; if not, write to the Free Software |
||
52 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
||
53 | * |
||
54 | */ |
||
55 | |||
56 | /* |
||
57 | * some functions are inspired on the implementation of OsKit.. |
||
58 | * |
||
59 | * Copyright (c) 1997, 1998, 1999 University of Utah and the Flux Group. |
||
60 | * All rights reserved. |
||
61 | * |
||
62 | * [...] The OSKit is free software, also known |
||
63 | * as "open source;" you can redistribute it and/or modify it under the terms |
||
64 | * of the GNU General Public License (GPL), version 2, as published by the Free |
||
65 | * Software Foundation (FSF). To explore alternate licensing terms, contact |
||
66 | * the University of Utah at csl-dist@cs.utah.edu or +1-801-585-3271. |
||
67 | * |
||
68 | * The OSKit is distributed in the hope that it will be useful, but WITHOUT ANY |
||
69 | * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
||
70 | * FOR A PARTICULAR PURPOSE. See the GPL for more details. You should have |
||
71 | * received a copy of the GPL along with the OSKit; see the file COPYING. If |
||
72 | * not, write to the FSF, 59 Temple Place #330, Boston, MA 02111-1307, USA. |
||
73 | */ |
||
74 | |||
75 | |||
76 | |||
77 | #include <ll/ll.h> |
||
78 | #include <ll/stdlib.h> |
||
79 | #include <ll/stdio.h> |
||
80 | #include <ll/i386/pic.h> |
||
81 | #include <signal.h> |
||
82 | #include <errno.h> |
||
83 | #include <kernel/descr.h> |
||
84 | #include <kernel/var.h> |
||
85 | #include <kernel/func.h> |
||
86 | #include <kernel/trace.h> |
||
87 | |||
88 | /* look at nanoslp.c */ |
||
89 | int nanosleep_interrupted_by_signal(PID i); |
||
90 | |||
91 | |||
92 | /*---------------------------------------------------------------------*/ |
||
93 | /* Data structures */ |
||
94 | /*---------------------------------------------------------------------*/ |
||
95 | |||
96 | /*+ A flag, see kern_raise +*/ |
||
97 | static int active_exc = 0; |
||
98 | |||
99 | /*+ The signal table... +*/ |
||
100 | static struct sigaction sigactions[SIG_MAX]; |
||
101 | |||
102 | /*+ There is a global (or "process") set of pending signals. |
||
103 | kill() and sigqueue() affect the process pending set. |
||
104 | +*/ |
||
105 | static sigset_t procsigpending; |
||
106 | |||
107 | /* |
||
108 | * A queue of all threads waiting in sigwait. |
||
109 | * It is not static because it is used into the task_kill...ð |
||
110 | */ |
||
29 | pj | 111 | static IQUEUE sigwaiters; |
2 | pj | 112 | |
113 | |||
114 | /*+ An array of queues of pending signals posted with sigqueue(). +*/ |
||
115 | static SIGQ sigqueued[SIG_MAX]; |
||
116 | |||
117 | /*+ We avoid malloc in interrupt handlers by preallocating the queue |
||
118 | entries for sig_queued above. |
||
119 | it is used also in kernel/time.c +*/ |
||
120 | SIGQ sigqueue_free; |
||
121 | |||
122 | /*+ this is the signal queue... +*/ |
||
123 | sig_queue_entry sig_queue[SIGQUEUE_MAX]; |
||
124 | |||
125 | /*+ alarm stuffs +*/ |
||
126 | static struct timespec alarm_time; |
||
127 | static int alarm_timer; |
||
128 | |||
129 | |||
130 | /* returns the first non-zero bit... */ |
||
131 | static int ffs(int value) |
||
132 | { |
||
133 | int x; |
||
134 | |||
135 | for (x=0; value; x++, value = value>>1) |
||
136 | if (value & 1) |
||
137 | return x; |
||
138 | return 0; |
||
139 | } |
||
140 | |||
141 | /*---------------------------------------------------------------------*/ |
||
142 | /* interruptable function registration... */ |
||
143 | /*---------------------------------------------------------------------*/ |
||
144 | |||
145 | |||
146 | /*+ this structure contains the functions to be called to test if a |
||
147 | task is blocked on a cancellation point +*/ |
||
148 | static struct { |
||
149 | int (*test)(PID p, void *arg); |
||
150 | void *arg; |
||
151 | } interruptable_table[MAX_SIGINTPOINTS]; |
||
152 | |||
153 | static int interruptable_points = 0; |
||
154 | |||
155 | |||
156 | /*+ This function register a cancellation point into the system. |
||
157 | Be careful!!! no check are performed... +*/ |
||
158 | void register_interruptable_point(int (*func)(PID p, void *arg), void *arg) |
||
159 | { |
||
160 | interruptable_table[interruptable_points].test = func; |
||
161 | interruptable_table[interruptable_points].arg = arg; |
||
162 | interruptable_points++; |
||
163 | } |
||
164 | |||
165 | static void test_interruptable_points(PID i) |
||
166 | { |
||
167 | int j; |
||
168 | |||
169 | /* check if the task is blocked on a cancellation point */ |
||
170 | for (j=0; j<interruptable_points; j++) |
||
171 | if (interruptable_table[j].test(i,interruptable_table[j].arg)) |
||
172 | break; |
||
173 | } |
||
174 | |||
175 | |||
176 | /*---------------------------------------------------------------------*/ |
||
177 | /* sigset_t handling functions */ |
||
178 | /*---------------------------------------------------------------------*/ |
||
179 | |||
180 | /* These functions will become soon macros... */ |
||
181 | int sigemptyset(sigset_t *set) |
||
182 | { |
||
183 | *set = 0; |
||
184 | |||
185 | return 0; |
||
186 | } |
||
187 | |||
188 | int sigfillset(sigset_t *set) |
||
189 | { |
||
190 | *set=0xFFFFFFFFUL; |
||
191 | |||
192 | return 0; |
||
193 | } |
||
194 | |||
195 | int sigaddset(sigset_t *set, int signo) |
||
196 | { |
||
197 | if (signo < 0 || signo >= SIG_MAX) |
||
198 | { |
||
199 | errno = EINVAL; |
||
200 | return -1; |
||
201 | } |
||
202 | |||
203 | *set |= 1 << signo; |
||
204 | return 0; |
||
205 | } |
||
206 | |||
207 | |||
208 | int sigdelset(sigset_t *set, int signo) |
||
209 | { |
||
210 | if (signo < 0 || signo >= SIG_MAX) |
||
211 | { |
||
212 | errno = EINVAL; |
||
213 | return -1; |
||
214 | } |
||
215 | |||
216 | *set &= ~(1 << signo); |
||
217 | return 0; |
||
218 | } |
||
219 | |||
220 | int sigismember(const sigset_t *set, int signo) |
||
221 | { |
||
222 | if (signo < 0 || signo >= SIG_MAX) |
||
223 | { |
||
224 | errno = EINVAL; |
||
225 | return -1; |
||
226 | } |
||
227 | |||
228 | return *set & (1 << signo ); |
||
229 | } |
||
230 | |||
231 | |||
232 | /*---------------------------------------------------------------------*/ |
||
233 | /* Finally, the public functions */ |
||
234 | /*---------------------------------------------------------------------*/ |
||
235 | |||
236 | /* |
||
237 | * Prototypes. |
||
238 | */ |
||
239 | void really_deliver_signal(int sig, siginfo_t *code); |
||
240 | void kern_deliver_async_signal(int sig); |
||
241 | void kern_deliver_process_signal(int sig); |
||
242 | |||
243 | int task_sigmask(int how, const sigset_t *set, sigset_t *oset) |
||
244 | { |
||
245 | proc_des *task; /* current executing task... */ |
||
246 | int err = 0; |
||
247 | |||
248 | kern_cli(); |
||
249 | |||
250 | task = &proc_table[exec_shadow]; |
||
251 | |||
252 | if (oset) |
||
253 | *oset = task->sigmask; |
||
254 | |||
255 | if (set) { |
||
256 | switch (how) { |
||
257 | case SIG_BLOCK: |
||
258 | task->sigmask |= *set; |
||
259 | break; |
||
260 | case SIG_UNBLOCK: |
||
261 | task->sigmask &= ~*set; |
||
262 | break; |
||
263 | case SIG_SETMASK: |
||
264 | task->sigmask = *set; |
||
265 | break; |
||
266 | default: |
||
267 | err = EINVAL; |
||
268 | } |
||
269 | } |
||
270 | |||
271 | /* |
||
272 | * Look for process pending signals that are unblocked, and deliver. |
||
273 | */ |
||
274 | while (procsigpending & ~task->sigmask) { |
||
275 | int sig = ffs(procsigpending & ~task->sigmask); |
||
276 | kern_deliver_process_signal(sig); |
||
277 | } |
||
278 | |||
279 | /* |
||
280 | * Look for task pending signals that are unblocked, and deliver. |
||
281 | */ |
||
282 | while (task->sigpending & ~task->sigmask) { |
||
283 | int sig = ffs(task->sigpending & ~task->sigmask); |
||
284 | kern_deliver_async_signal(sig); |
||
285 | } |
||
286 | |||
287 | kern_sti(); |
||
288 | return err; |
||
289 | } |
||
290 | |||
291 | /* |
||
292 | * This can be called out of an interrupt handler, say from an alarm |
||
293 | * expiration. |
||
294 | */ |
||
295 | int |
||
296 | task_signal(PID p, int signo) |
||
297 | { |
||
298 | // int enabled; |
||
299 | |||
300 | /* Error check? Sure! */ |
||
301 | if (!signo) |
||
302 | return 0; |
||
303 | |||
304 | if (signo < 0 || signo >= SIG_MAX) |
||
305 | return EINVAL; |
||
306 | |||
307 | if (proc_table[p].status == FREE) |
||
308 | return EINVAL; |
||
309 | |||
310 | kern_cli(); |
||
311 | |||
312 | /* |
||
313 | * Look at the process sigactions. If the "process" is ignoring |
||
314 | * the signal, then the signal is not placed in the pending list. |
||
315 | */ |
||
316 | if (!(sigactions[signo].sa_flags & SA_SIGINFO) && |
||
317 | sigactions[signo].sa_handler == SIG_IGN) { |
||
318 | kern_sti(); |
||
319 | return 0; |
||
320 | } |
||
321 | |||
322 | /* |
||
323 | * Add the signal to list of pending signals for the target task. |
||
324 | */ |
||
325 | sigaddset(&proc_table[p].sigpending, signo); |
||
326 | |||
327 | /* check for an interruptable function!!! */ |
||
328 | test_interruptable_points(p); |
||
329 | |||
330 | if (proc_table[p].status == WAIT_SIGSUSPEND) { |
||
331 | LEVEL l; |
||
332 | |||
333 | /* Reactivate the task... */ |
||
29 | pj | 334 | iq_extract(p, &sigwaiters); |
2 | pj | 335 | |
336 | l = proc_table[p].task_level; |
||
337 | level_table[l]->task_insert(l,p); |
||
338 | |||
339 | } |
||
340 | |||
341 | |||
342 | /* |
||
343 | * If not in an interrupt, use this opportunity to deliver |
||
344 | * pending unblocked signals to the current thread. |
||
345 | */ |
||
346 | if (!ll_ActiveInt()) { |
||
347 | kern_deliver_pending_signals(); |
||
348 | } |
||
349 | |||
350 | kern_sti(); |
||
351 | return 0; |
||
352 | } |
||
353 | |||
354 | /* |
||
355 | * sigaction |
||
356 | */ |
||
357 | int |
||
358 | sigaction(int sig, const struct sigaction *act, struct sigaction *oact) |
||
359 | { |
||
360 | int sos; /* used to empty the sigqueue... */ |
||
361 | SYS_FLAGS f; |
||
362 | |||
363 | |||
364 | if (sig < 0 || sig >= SIG_MAX) |
||
365 | return errno = EINVAL, -1; |
||
366 | |||
367 | f = kern_fsave(); |
||
368 | |||
369 | if (oact) |
||
370 | *oact = sigactions[sig]; |
||
371 | if (act) |
||
372 | sigactions[sig] = *act; |
||
373 | |||
374 | /* |
||
375 | * If the action for this signal is being set to SIG_IGN or SIG_DFL, |
||
376 | * and that signal is process pending, then clear it. |
||
377 | */ |
||
378 | if (act && !(act->sa_flags & SA_SIGINFO) && |
||
379 | (act->sa_handler == SIG_IGN || act->sa_handler == SIG_DFL)) { |
||
380 | sos = sigqueued[sig]; |
||
381 | while (sos != -1) { |
||
382 | /* Remove the first entry and put it to the free |
||
383 | queue */ |
||
384 | sos = sig_queue[sigqueued[sig]].next; |
||
385 | |||
386 | if (sig_queue[sigqueued[sig]].flags & USED_FOR_TIMER) |
||
387 | sig_queue[sigqueued[sig]].flags &= ~SIGNAL_POSTED; |
||
388 | else { |
||
389 | sig_queue[sigqueued[sig]].next = sigqueue_free; |
||
390 | sigqueue_free = sigqueued[sig]; |
||
391 | } |
||
392 | } |
||
393 | sigqueued[sig] = -1; |
||
394 | sigdelset(&procsigpending, sig); |
||
395 | } |
||
396 | |||
397 | kern_frestore(f); |
||
398 | return 0; |
||
399 | } |
||
400 | |||
401 | /* |
||
402 | * sigprocmask. this is just task_sigmask |
||
403 | */ |
||
404 | int |
||
405 | sigprocmask(int how, const sigset_t *set, sigset_t *oset) |
||
406 | { |
||
407 | return task_sigmask(how, set, oset); |
||
408 | } |
||
409 | |||
410 | /* |
||
411 | * raise. this is just task_signal on itself. |
||
412 | */ |
||
413 | int |
||
414 | raise(int sig) |
||
415 | { |
||
416 | return task_signal(exec_shadow, sig); |
||
417 | } |
||
418 | |||
419 | /* |
||
420 | * kill. What does it mean to kill() in a multithreaded program? The POSIX |
||
421 | * spec says that a signal sent to a "process" shall be delivered to only |
||
422 | * one task. If no task has that signal unblocked, then the first |
||
423 | * task to unblock the signal is the lucky winner. Well, that means we |
||
424 | * need to have a global procsigpending to record process pending signals. |
||
425 | */ |
||
426 | int |
||
427 | kill(pid_t pid, int signo) |
||
428 | { |
||
429 | PID task; |
||
430 | PID i; |
||
431 | SYS_FLAGS f; |
||
432 | struct sigaction act; |
||
433 | |||
434 | /* Error check? Sure! */ |
||
435 | if (!signo) |
||
436 | return 0; |
||
437 | |||
438 | if (signo < 0 || signo >= SIG_MAX) |
||
439 | return EINVAL; |
||
440 | |||
441 | |||
442 | f = kern_fsave(); |
||
443 | |||
444 | act = sigactions[signo]; |
||
445 | |||
446 | if (!(act.sa_flags & SA_SIGINFO) && act.sa_handler == SIG_IGN) { |
||
447 | kern_frestore(f); |
||
448 | return 0; |
||
449 | } |
||
450 | |||
451 | /* |
||
452 | * Kill does not queue. If the signal is already pending, this |
||
453 | * one is tossed. |
||
454 | */ |
||
455 | if (sigismember(&procsigpending, signo)) { |
||
456 | kern_frestore(f); |
||
457 | return 0; |
||
458 | } |
||
459 | |||
460 | /* |
||
461 | * Make the signal process pending. |
||
462 | */ |
||
463 | sigaddset(&procsigpending, signo); |
||
464 | |||
465 | /* |
||
466 | * Look through the threads in sigwait to see if any of them |
||
467 | * is waiting for the signal. This is done as a separate pass |
||
468 | * since the value of the pthread sigmask is ignored (threads |
||
469 | * in sigwait will have blocked the signals being waited for). |
||
470 | */ |
||
471 | |||
29 | pj | 472 | for (task = iq_query_first(&sigwaiters); |
2 | pj | 473 | task != NIL; |
29 | pj | 474 | task = iq_query_next(task, &sigwaiters)) { |
2 | pj | 475 | if (sigismember(&proc_table[task].sigwaiting, signo)) { |
476 | LEVEL l; |
||
477 | |||
478 | if (proc_table[task].status == WAIT_SIGSUSPEND) |
||
479 | sigaddset(&proc_table[task].sigpending, signo); |
||
480 | |||
481 | /* Reactivate the task... */ |
||
29 | pj | 482 | iq_extract(task, &sigwaiters); |
2 | pj | 483 | l = proc_table[task].task_level; |
484 | level_table[l]->task_insert(l,task); |
||
485 | |||
486 | if (proc_table[task].delay_timer != -1) { |
||
487 | event_delete(proc_table[task].delay_timer); |
||
488 | proc_table[task].delay_timer = -1; |
||
489 | } |
||
490 | |||
491 | kern_frestore(f); |
||
492 | return 0; |
||
493 | } |
||
494 | } |
||
495 | |||
496 | /* |
||
497 | * No threads in sigwait. Too bad. Must find another thread to |
||
498 | * deliver it to. |
||
499 | */ |
||
500 | for (i = 1; i < MAX_PROC; i++) { |
||
501 | if (proc_table[i].status != FREE) { |
||
502 | if (! sigismember(&proc_table[i].sigmask, signo)) { |
||
503 | /* Add the signal to list of pending |
||
504 | signals for the target task. */ |
||
505 | sigaddset(&proc_table[i].sigpending, signo); |
||
506 | |||
507 | /* check for an interruptable function!!! */ |
||
508 | test_interruptable_points(i); |
||
509 | break; |
||
510 | } |
||
511 | } |
||
512 | } |
||
513 | |||
514 | /* |
||
515 | * If not in an interrupt, use this opportunity to deliver |
||
516 | * pending unblocked signals to the current thread. |
||
517 | */ |
||
518 | if (! ll_ActiveInt()) { |
||
519 | kern_deliver_pending_signals(); |
||
520 | } |
||
521 | |||
522 | kern_frestore(f); |
||
523 | return 0; |
||
524 | } |
||
525 | |||
526 | /* |
||
527 | * sigqueue internal: accept also the SI_XXX value |
||
528 | */ |
||
529 | int |
||
530 | sigqueue_internal(pid_t pid, int signo, const union sigval value, int si_code) |
||
531 | { |
||
532 | PID task; |
||
533 | SYS_FLAGS f; |
||
534 | int i; |
||
535 | |||
536 | int thingie; /* an element of the signal queue */ |
||
537 | int sos; /* used when inserting thinghie in |
||
538 | the signal queue */ |
||
539 | struct sigaction act; |
||
540 | |||
541 | /* Error check? Sure! */ |
||
542 | if (!signo) |
||
543 | return 0; |
||
544 | |||
545 | if (signo < 0 || signo >= SIG_MAX) |
||
546 | return EINVAL; |
||
547 | |||
548 | |||
549 | f = kern_fsave(); |
||
550 | /* |
||
551 | * Look at the process sigactions. If the "process" is ignoring |
||
552 | * the signal, then the signal is not placed in the pending list. |
||
553 | */ |
||
554 | act = sigactions[signo]; |
||
555 | |||
556 | if (!(act.sa_flags & SA_SIGINFO) && act.sa_handler == SIG_IGN) { |
||
557 | kern_frestore(f); |
||
558 | return 0; |
||
559 | } |
||
560 | |||
561 | |||
562 | /* |
||
563 | * If the flags does not include SA_SIGINFO, and there is already |
||
564 | * a signal pending, this new one is dropped. |
||
565 | */ |
||
566 | if ((! (act.sa_flags & SA_SIGINFO)) && |
||
567 | sigismember(&procsigpending, signo)) { |
||
568 | kern_frestore(f); |
||
569 | return 0; |
||
570 | } |
||
571 | |||
572 | /* |
||
573 | * Gotta have space for the new signal. |
||
574 | */ |
||
575 | if (sigqueue_free == -1) { |
||
576 | kern_frestore(f); |
||
577 | return EAGAIN; |
||
578 | } |
||
579 | |||
580 | /* |
||
581 | * Create a queue entry. |
||
582 | */ |
||
583 | thingie = sigqueue_free; |
||
584 | sigqueue_free = sig_queue[sigqueue_free].next; |
||
585 | |||
586 | sig_queue[thingie].info.si_signo = signo; |
||
587 | sig_queue[thingie].info.si_code = si_code; |
||
588 | sig_queue[thingie].info.si_value = value; |
||
589 | sig_queue[thingie].info.si_task = exec_shadow; |
||
590 | sig_queue[thingie].next = -1; |
||
591 | |||
592 | /* |
||
593 | * Queue the signal on the process. |
||
594 | */ |
||
595 | |||
596 | /* we insert the signal at the queue's tail */ |
||
597 | if (sigqueued[signo] == -1) |
||
598 | sigqueued[signo] = thingie; |
||
599 | else { |
||
600 | sos = sigqueued[signo]; |
||
601 | while (sig_queue[sos].next != -1) sos = sig_queue[sos].next; |
||
602 | sig_queue[sos].next = thingie; |
||
603 | } |
||
604 | sigaddset(&procsigpending, signo); |
||
605 | |||
606 | /* |
||
607 | * Look through the threads in sigwait to see if any of them |
||
608 | * is waiting for the signal. This is done as a separate pass |
||
609 | * since the value of the pthread sigmask is ignored (threads |
||
610 | * in sigwait will have blocked the signals being waited for). |
||
611 | * If we find one, wakeup that thread. Note that POSIX says that |
||
612 | * if multiple threads are sigwaiting for the same signal number, |
||
613 | * exactly one thread is woken up. The problem is how to maintain |
||
614 | * the FIFO order, and how to prevent lost signals in the case that |
||
615 | * a thread calls sigwait before the woken thread runs and gets it. |
||
616 | */ |
||
29 | pj | 617 | for (task = iq_query_first(&sigwaiters); |
2 | pj | 618 | task != NIL; |
29 | pj | 619 | task = iq_query_next(task, &sigwaiters)) { |
2 | pj | 620 | if (sigismember(&proc_table[task].sigwaiting, signo)) { |
621 | LEVEL l; |
||
622 | |||
623 | if (proc_table[task].status == WAIT_SIGSUSPEND) |
||
624 | sigaddset(&proc_table[task].sigpending, signo); |
||
625 | |||
626 | /* Reactivate the task... */ |
||
29 | pj | 627 | iq_extract(task, &sigwaiters); |
2 | pj | 628 | |
629 | l = proc_table[task].task_level; |
||
630 | level_table[l]->task_insert(l,task); |
||
631 | |||
632 | if (proc_table[task].delay_timer != -1) { |
||
633 | event_delete(proc_table[task].delay_timer); |
||
634 | proc_table[task].delay_timer = -1; |
||
635 | } |
||
636 | |||
637 | kern_frestore(f); |
||
638 | return 0; |
||
639 | |||
640 | } |
||
641 | } |
||
642 | |||
643 | /* |
||
644 | * Need to find a thread to deliver the signal to. Look for the |
||
645 | * first thread that is not blocking the signal, and send it the |
||
646 | * signal. It is my opinion that any program that is using sigwait, |
||
647 | * and has not blocked signals in all of its threads, is bogus. The |
||
648 | * same is true if the program is not using sigwait, and has the |
||
649 | * signal unblocked in more than one thread. |
||
650 | * Why? You might wake up a thread, but not have an actual queue |
||
651 | * entry left by the time it runs again and looks, since another |
||
652 | * thread could call sigwait and get that queue entry, or if there |
||
653 | * are multiple threads that can take the signal, one thread could |
||
654 | * get all the entries. This could result in an interrupted thread, |
||
655 | * but with no signal to deliver. Well, not much to do about it. |
||
656 | * Lets just queue the signal for the process, and let the chips |
||
657 | * fall where they may. |
||
658 | */ |
||
659 | for (i = 1; i < MAX_PROC; i++) { |
||
660 | if (proc_table[i].status != FREE) { |
||
661 | if (! sigismember(&proc_table[i].sigmask, signo)) { |
||
662 | /* Add the signal to list of pending |
||
663 | signals for the target task. */ |
||
664 | sigaddset(&proc_table[i].sigpending, signo); |
||
665 | |||
666 | /* check for an interruptable function!!! */ |
||
667 | test_interruptable_points(i); |
||
668 | |||
669 | break; |
||
670 | } |
||
671 | } |
||
672 | } |
||
673 | |||
674 | /* |
||
675 | * If not in an interrupt, use this opportunity to deliver |
||
676 | * pending unblocked signals to the current thread. |
||
677 | * (NB: a discussion on the flag active_exc is near the function |
||
678 | * kern_raise() ) |
||
679 | */ |
||
680 | if (! ll_ActiveInt() && active_exc == 0) { |
||
681 | kern_deliver_pending_signals(); |
||
682 | } |
||
683 | |||
684 | kern_frestore(f); |
||
685 | return 0; |
||
686 | } |
||
687 | |||
688 | static void sigwait_timer(void *arg) |
||
689 | { |
||
690 | PID p = (PID)arg; |
||
691 | LEVEL l; |
||
692 | |||
693 | /* reset the event timer */ |
||
694 | proc_table[p].delay_timer = -1; |
||
695 | |||
696 | /* set the timeout flag */ |
||
697 | proc_table[p].control |= SIGTIMEOUT_EXPIRED; |
||
698 | |||
699 | /* insert the task into the ready queue and extract it from the waiters */ |
||
29 | pj | 700 | iq_extract(p, &sigwaiters); |
2 | pj | 701 | |
702 | l = proc_table[p].task_level; |
||
703 | level_table[l]->task_insert(l,p); |
||
704 | |||
705 | event_need_reschedule(); |
||
706 | } |
||
707 | |||
708 | /* |
||
709 | * Sigwait. Sigwait overrides the state of the pthread sigmask and the global |
||
710 | * sigactions. The caller *must* block the set of signals in "set", before |
||
711 | * calling sigwait, otherwise the behaviour is undefined (which means that |
||
712 | * the caller will take an async signal anyway, and sigwait will return EINTR. |
||
713 | */ |
||
714 | int |
||
715 | kern_sigwait_internal(const sigset_t *set, |
||
716 | siginfo_t *info, const struct timespec *timeout) |
||
717 | { |
||
718 | proc_des *pthread = &proc_table[exec_shadow]; |
||
719 | int thissig; |
||
720 | |||
721 | struct timespec ty; |
||
722 | TIME tx; |
||
723 | LEVEL l; |
||
724 | |||
725 | task_testcancel(); |
||
726 | |||
727 | /* siglock and pthread siglock are taken from an interrupt handler */ |
||
728 | kern_cli(); |
||
729 | |||
730 | /* |
||
731 | * First check for process pending signals. Must take and hold |
||
732 | * the global siglock to prevent races with kill() and sigqueue(). |
||
733 | */ |
||
734 | if (procsigpending & *set) { |
||
735 | int sos; |
||
736 | |||
737 | thissig = ffs(procsigpending & *set); |
||
738 | |||
739 | /* |
||
740 | * Sent with kill(). Using sigwait and kill is Bogus! |
||
741 | */ |
||
742 | if (sigqueued[thissig] == -1) { |
||
743 | info->si_signo = thissig; |
||
744 | info->si_code = SI_USER; |
||
745 | info->si_value.sival_int = 0; |
||
746 | |||
747 | sigdelset(&pthread->sigpending, thissig); |
||
748 | sigdelset(&procsigpending, thissig); |
||
749 | kern_sti(); |
||
750 | return 0; |
||
751 | } |
||
752 | |||
753 | /* |
||
754 | * Grab the first queue entry. |
||
755 | */ |
||
756 | sos = sigqueued[thissig]; |
||
29 | pj | 757 | sigqueued[thissig] = sig_queue[sos].next; |
2 | pj | 758 | |
759 | /* |
||
760 | * If that was the last one, reset the process procsigpending. |
||
761 | */ |
||
762 | if (sigqueued[thissig] == -1) |
||
763 | sigdelset(&procsigpending, thissig); |
||
764 | sigdelset(&pthread->sigpending, thissig); |
||
765 | |||
766 | /* |
||
767 | * Copy the information and free the queue entry. |
||
768 | */ |
||
769 | info->si_signo = sig_queue[sos].info.si_signo; |
||
770 | info->si_code = sig_queue[sos].info.si_code; |
||
771 | info->si_value.sival_int = sig_queue[sos].info.si_value.sival_int; |
||
772 | |||
773 | if (sig_queue[sos].flags & USED_FOR_TIMER) |
||
774 | sig_queue[sos].flags &= ~SIGNAL_POSTED; |
||
775 | else { |
||
776 | sig_queue[sos].next = sigqueue_free; |
||
777 | sigqueue_free = sos; |
||
778 | } |
||
779 | kern_sti(); |
||
780 | return 0; |
||
781 | } |
||
782 | |||
783 | /* |
||
784 | * Now check for pthread pending signals. |
||
785 | */ |
||
786 | if (pthread->sigpending & *set) { |
||
787 | thissig = ffs(pthread->sigpending & *set); |
||
788 | info->si_signo = thissig; |
||
789 | info->si_code = SI_USER; |
||
790 | info->si_value.sival_int = 0; |
||
791 | sigdelset(&pthread->sigpending, thissig); |
||
792 | kern_sti(); |
||
793 | return 0; |
||
794 | } |
||
795 | |||
796 | /* |
||
797 | * For timed wait, if nothing is available and the timeout value |
||
798 | * is zero, its an error. |
||
799 | */ |
||
800 | if (timeout && timeout->tv_sec == 0 && timeout->tv_nsec == 0) { |
||
801 | kern_sti(); |
||
802 | return EAGAIN; |
||
803 | } |
||
804 | |||
805 | /* |
||
806 | * Grab the wait lock and set the sigwaiting mask. Once that is done, |
||
807 | * release the thread siglock; Another thread can try and wake this |
||
808 | * thread up as a result of seeing it in sigwait, but the actual |
||
809 | * wakeup will be delayed until the waitlock is released in the switch |
||
810 | * code. |
||
811 | */ |
||
812 | pthread->sigwaiting = *set; |
||
813 | |||
814 | /* now, we really block the task... */ |
||
815 | proc_table[exec_shadow].context = kern_context_save(); |
||
816 | |||
817 | /* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
||
818 | ll_gettime(TIME_EXACT, &schedule_time); |
||
819 | SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
||
820 | tx = TIMESPEC2USEC(&ty); |
||
821 | proc_table[exec_shadow].avail_time -= tx; |
||
822 | jet_update_slice(tx); |
||
823 | if (cap_timer != NIL) { |
||
824 | event_delete(cap_timer); |
||
825 | cap_timer = NIL; |
||
826 | } |
||
827 | l = proc_table[exec_shadow].task_level; |
||
828 | level_table[l]->task_extract(l,exec_shadow); |
||
829 | |||
830 | /* |
||
831 | * Add this thread to the list of threads in sigwait. Once that is |
||
832 | * done, it is safe to release the global siglock, which will allow |
||
833 | * another thread to scan the sigwaiters list. As above, it might |
||
834 | * find a thread in sigwait, but it will not be able to wake it up |
||
835 | * until the waitlock is released in the switch code. |
||
836 | */ |
||
29 | pj | 837 | iq_insertfirst(exec_shadow, &sigwaiters); |
2 | pj | 838 | proc_table[exec_shadow].status = WAIT_SIG; |
839 | |||
840 | if (timeout) { |
||
841 | /* we can use the delaytimer because if we are here we are not in a |
||
842 | task_delay */ |
||
843 | struct timespec t, abstime; |
||
844 | ll_gettime(TIME_EXACT, &t); |
||
845 | ADDTIMESPEC(&t, timeout, &abstime); |
||
846 | |||
847 | proc_table[exec_shadow].delay_timer = |
||
848 | kern_event_post(&abstime,sigwait_timer,(void *)exec_shadow); |
||
849 | } |
||
850 | |||
851 | /* and finally we reschedule */ |
||
852 | exec = exec_shadow = -1; |
||
853 | scheduler(); |
||
854 | ll_context_to(proc_table[exec_shadow].context); |
||
855 | |||
856 | task_testcancel(); |
||
857 | |||
858 | pthread->sigwaiting = 0; |
||
859 | |||
860 | /* |
||
861 | * Look for timeout. |
||
862 | */ |
||
863 | if (proc_table[exec_shadow].control & SIGTIMEOUT_EXPIRED) { |
||
864 | kern_sti(); |
||
865 | return EAGAIN; |
||
866 | } |
||
867 | |||
868 | /* |
||
869 | * Look for a wakeup to deliver a queued signal. This would come |
||
870 | * either from kill() or from sigqueue(). |
||
871 | */ |
||
872 | if (procsigpending & *set) { |
||
873 | int sos; |
||
874 | |||
875 | thissig = ffs(procsigpending & *set); |
||
876 | |||
877 | /* |
||
878 | * Sent with kill(). Using sigwait and kill is Bogus! |
||
879 | */ |
||
880 | if (sigqueued[thissig] == -1) { |
||
881 | info->si_signo = thissig; |
||
882 | info->si_code = SI_USER; |
||
883 | info->si_value.sival_int = 0; |
||
884 | |||
885 | sigdelset(&procsigpending, thissig); |
||
886 | kern_sti(); |
||
887 | return 0; |
||
888 | } |
||
889 | |||
890 | /* |
||
891 | * Grab the first queue entry. |
||
892 | */ |
||
29 | pj | 893 | sos = sigqueued[thissig]; |
894 | sigqueued[thissig] = sig_queue[sos].next; |
||
2 | pj | 895 | |
896 | /* |
||
897 | * If that was the last one, reset the process procsigpending. |
||
898 | */ |
||
899 | if (sigqueued[thissig] == -1) |
||
900 | sigdelset(&procsigpending, thissig); |
||
901 | |||
902 | /* |
||
903 | * Copy the information and free the queue entry. |
||
904 | */ |
||
905 | info->si_signo = sig_queue[sos].info.si_signo; |
||
906 | info->si_code = sig_queue[sos].info.si_code; |
||
907 | info->si_value.sival_int = sig_queue[sos].info.si_value.sival_int; |
||
908 | |||
909 | if (sig_queue[sos].flags & USED_FOR_TIMER) |
||
910 | sig_queue[sos].flags &= ~SIGNAL_POSTED; |
||
911 | else { |
||
912 | sig_queue[sos].next = sigqueue_free; |
||
913 | sigqueue_free = sos; |
||
914 | } |
||
915 | |||
916 | kern_sti(); |
||
917 | return 0; |
||
918 | } |
||
919 | |||
920 | /* |
||
921 | * Well, at the moment I am going to assume that if this thread |
||
922 | * wakes up, and there is no signal pending in the waitset, the |
||
923 | * thread wait was interrupted for some other reason. Return EINTR. |
||
924 | */ |
||
925 | if (! (pthread->sigpending & *set)) { |
||
926 | kern_sti(); |
||
927 | return EINTR; |
||
928 | } |
||
929 | |||
930 | /* |
||
931 | * Otherwise, get the first signal and return it. |
||
932 | */ |
||
933 | thissig = ffs(pthread->sigpending & *set); |
||
934 | info->si_signo = thissig; |
||
935 | info->si_code = SI_USER; |
||
936 | info->si_value.sival_int = 0; |
||
937 | sigdelset(&pthread->sigpending, thissig); |
||
938 | kern_sti(); |
||
939 | return 0; |
||
940 | } |
||
941 | |||
942 | /* |
||
943 | * Sigwait. |
||
944 | */ |
||
945 | int |
||
946 | sigwait(const sigset_t *set, int *sig) |
||
947 | { |
||
948 | siginfo_t info; |
||
949 | int rc; |
||
950 | |||
951 | memset(&info, 0, sizeof(info)); |
||
952 | |||
953 | rc = kern_sigwait_internal(set, &info, 0); |
||
954 | |||
955 | if (rc) |
||
956 | return rc; |
||
957 | |||
958 | *sig = info.si_signo; |
||
959 | return 0; |
||
960 | } |
||
961 | |||
962 | /* |
||
963 | * Sigwaitinfo. |
||
964 | */ |
||
965 | int |
||
966 | sigwaitinfo(const sigset_t *set, siginfo_t *info) |
||
967 | { |
||
968 | return kern_sigwait_internal(set, info, 0); |
||
969 | } |
||
970 | |||
971 | /* |
||
972 | * Sigtimedwait. |
||
973 | */ |
||
974 | int |
||
975 | sigtimedwait(const sigset_t *set, |
||
976 | siginfo_t *info, const struct timespec *timeout) |
||
977 | { |
||
978 | if (! timeout) |
||
979 | return EINVAL; |
||
980 | |||
981 | return kern_sigwait_internal(set, info, timeout); |
||
982 | } |
||
983 | |||
984 | /* |
||
985 | * Signal |
||
986 | */ |
||
987 | void (*signal(int signum, void (*handler)(int)))(int) |
||
988 | { |
||
989 | struct sigaction act, oact; |
||
990 | int olderrno; |
||
991 | void (*retvalue)(int); |
||
992 | |||
993 | act.sa_handler = handler; |
||
994 | sigemptyset(&act.sa_mask); |
||
995 | act.sa_flags = 0; |
||
996 | |||
997 | olderrno = errno; |
||
998 | if (sigaction(signum, &act, &oact)) |
||
999 | retvalue = SIG_ERR; |
||
1000 | else |
||
1001 | if (oact.sa_flags & SA_SIGINFO) |
||
1002 | retvalue = SIG_ERR; |
||
1003 | else |
||
1004 | retvalue = oact.sa_handler; |
||
1005 | |||
1006 | errno = olderrno; |
||
1007 | |||
1008 | return retvalue; |
||
1009 | |||
1010 | } |
||
1011 | |||
1012 | |||
1013 | /* |
||
1014 | * sigpending |
||
1015 | */ |
||
1016 | int sigpending(sigset_t *set) |
||
1017 | { |
||
1018 | *set = procsigpending | proc_table[exec_shadow].sigpending; |
||
1019 | return 0; |
||
1020 | } |
||
1021 | |||
1022 | |||
1023 | /* |
||
1024 | * sigsuspend |
||
1025 | */ |
||
1026 | int sigsuspend(const sigset_t *set) |
||
1027 | { |
||
1028 | proc_des *pthread = &proc_table[exec_shadow]; |
||
1029 | |||
1030 | struct timespec ty; |
||
1031 | TIME tx; |
||
1032 | LEVEL l; |
||
1033 | |||
1034 | task_testcancel(); |
||
1035 | |||
1036 | kern_cli(); |
||
1037 | |||
1038 | /* |
||
1039 | * Now check for pthread pending signals. |
||
1040 | */ |
||
1041 | if (pthread->sigpending & *set) { |
||
1042 | kern_deliver_pending_signals(); |
||
1043 | kern_sti(); |
||
1044 | return 0; |
||
1045 | } |
||
1046 | |||
1047 | /* |
||
1048 | * Grab the wait lock and set the sigwaiting mask. Once that is done, |
||
1049 | * release the thread siglock; Another thread can try and wake this |
||
1050 | * thread up as a result of seeing it in sigwait, but the actual |
||
1051 | * wakeup will be delayed until the waitlock is released in the switch |
||
1052 | * code. |
||
1053 | */ |
||
1054 | pthread->sigwaiting = *set; |
||
1055 | |||
1056 | /* now, we really block the task... */ |
||
1057 | proc_table[exec_shadow].context = kern_context_save(); |
||
1058 | |||
1059 | /* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
||
1060 | ll_gettime(TIME_EXACT, &schedule_time); |
||
1061 | SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
||
1062 | tx = TIMESPEC2USEC(&ty); |
||
1063 | proc_table[exec_shadow].avail_time -= tx; |
||
1064 | jet_update_slice(tx); |
||
1065 | if (cap_timer != NIL) { |
||
1066 | event_delete(cap_timer); |
||
1067 | cap_timer = NIL; |
||
1068 | } |
||
1069 | l = proc_table[exec_shadow].task_level; |
||
1070 | level_table[l]->task_extract(l,exec_shadow); |
||
1071 | |||
29 | pj | 1072 | iq_insertfirst(exec_shadow, &sigwaiters); |
2 | pj | 1073 | proc_table[exec_shadow].status = WAIT_SIGSUSPEND; |
1074 | |||
1075 | /* and finally we reschedule */ |
||
1076 | exec = exec_shadow = -1; |
||
1077 | scheduler(); |
||
1078 | ll_context_to(proc_table[exec_shadow].context); |
||
1079 | |||
1080 | task_testcancel(); |
||
1081 | |||
1082 | /* |
||
1083 | * Well, at the moment I am going to assume that if this thread |
||
1084 | * wakes up, and there is no signal pending in the waitset, the |
||
1085 | * thread wait was interrupted for some other reason. Return EINTR. |
||
1086 | */ |
||
1087 | if (! (pthread->sigpending & *set)) { |
||
1088 | kern_sti(); |
||
1089 | return EINTR; |
||
1090 | } |
||
1091 | |||
1092 | /* |
||
1093 | * Otherwise, deliver the signals. |
||
1094 | */ |
||
1095 | kern_deliver_pending_signals(); |
||
1096 | kern_sti(); |
||
1097 | return 0; |
||
1098 | } |
||
1099 | |||
1100 | |||
1101 | void timer_alarmfire(void *arg) |
||
1102 | { |
||
1103 | alarm_timer = -1; |
||
1104 | |||
1105 | kill(0, SIGALRM); |
||
1106 | |||
1107 | event_need_reschedule(); |
||
1108 | } |
||
1109 | |||
1110 | /* |
||
1111 | * alarm |
||
1112 | */ |
||
1113 | unsigned int alarm(unsigned int seconds) |
||
1114 | { |
||
1115 | struct timespec returnvalue, temp; |
||
1116 | |||
1117 | kern_cli(); |
||
1118 | |||
1119 | ll_gettime(TIME_EXACT, &temp); |
||
1120 | |||
1121 | if (alarm_timer == -1) |
||
1122 | returnvalue.tv_sec = 0; |
||
1123 | else { |
||
1124 | SUBTIMESPEC(&alarm_time, &temp, &returnvalue); |
||
1125 | |||
1126 | event_delete(alarm_timer); |
||
1127 | } |
||
1128 | |||
1129 | if (seconds) { |
||
1130 | temp.tv_sec += seconds; |
||
1131 | TIMESPEC_ASSIGN(&alarm_time, &temp); |
||
1132 | alarm_timer = kern_event_post(&temp, timer_alarmfire, NULL); |
||
1133 | } |
||
1134 | else |
||
1135 | alarm_timer = -1; |
||
1136 | |||
1137 | kern_sti(); |
||
1138 | |||
1139 | return returnvalue.tv_sec; |
||
1140 | } |
||
1141 | |||
1142 | int pause(void) |
||
1143 | { |
||
1144 | sigset_t set; |
||
1145 | |||
1146 | sigfillset(&set); |
||
1147 | return sigsuspend(&set); |
||
1148 | } |
||
1149 | |||
1150 | /* |
||
1151 | * Internal stuff. |
||
1152 | */ |
||
1153 | |||
1154 | /* |
||
1155 | * Deliver an asynchronous signal. This must be called with interrupts |
||
1156 | * blocked and the pthread siglock held. |
||
1157 | */ |
||
1158 | void |
||
1159 | kern_deliver_async_signal(int sig) |
||
1160 | { |
||
1161 | siginfo_t siginfo; |
||
1162 | |||
1163 | siginfo.si_signo = sig; |
||
1164 | siginfo.si_code = SI_USER; |
||
1165 | siginfo.si_value.sival_int = 0; |
||
1166 | siginfo.si_task = exec_shadow; |
||
1167 | |||
1168 | really_deliver_signal(sig, &siginfo); |
||
1169 | } |
||
1170 | |||
1171 | /* |
||
1172 | * Deliver a process signals. This must be called with interrupts |
||
1173 | * blocked and the siglock and pthread siglock held. |
||
1174 | */ |
||
1175 | void |
||
1176 | kern_deliver_process_signal(int sig) |
||
1177 | { |
||
1178 | siginfo_t siginfo; |
||
1179 | int thingie; |
||
1180 | |||
1181 | /* |
||
1182 | * Sent with kill(). Using sigwait and kill is Bogus! |
||
1183 | */ |
||
1184 | if (sigqueued[sig] == -1) { |
||
1185 | siginfo.si_signo = sig; |
||
1186 | siginfo.si_code = SI_USER; |
||
1187 | siginfo.si_value.sival_int = 0; |
||
1188 | siginfo.si_task = exec_shadow; |
||
1189 | |||
1190 | sigdelset(&procsigpending, sig); |
||
1191 | goto deliver; |
||
1192 | } |
||
1193 | |||
1194 | /* |
||
1195 | * Grab the first queue entry. |
||
1196 | */ |
||
1197 | thingie = sigqueued[sig]; |
||
1198 | sigqueued[sig] = sig_queue[sigqueued[sig]].next; |
||
1199 | |||
1200 | /* |
||
1201 | * If that was the last one, reset the process sigpending. |
||
1202 | */ |
||
1203 | if (sigqueued[sig] == -1) |
||
1204 | sigdelset(&procsigpending, sig); |
||
1205 | |||
1206 | /* |
||
1207 | * Copy the information and free the queue entry. |
||
1208 | */ |
||
1209 | siginfo.si_signo = sig_queue[thingie].info.si_signo; |
||
1210 | siginfo.si_code = sig_queue[thingie].info.si_code; |
||
1211 | siginfo.si_value.sival_int = sig_queue[thingie].info.si_value.sival_int; |
||
1212 | siginfo.si_task = sig_queue[thingie].info.si_task; |
||
1213 | |||
1214 | if (sig_queue[thingie].flags & USED_FOR_TIMER) |
||
1215 | sig_queue[thingie].flags &= ~SIGNAL_POSTED; |
||
1216 | else { |
||
1217 | sig_queue[thingie].next = sigqueue_free; |
||
1218 | sigqueue_free = thingie; |
||
1219 | } |
||
1220 | |||
1221 | deliver: |
||
1222 | really_deliver_signal(sig, &siginfo); |
||
1223 | |||
1224 | } |
||
1225 | |||
1226 | /* |
||
1227 | * Deliver any pending signals. Called out of the context switch code |
||
1228 | * when a task switches in, and there are pending signals. |
||
1229 | * |
||
1230 | * Interrupts are blocked... |
||
1231 | */ |
||
1232 | void |
||
1233 | kern_deliver_pending_signals(void) |
||
1234 | { |
||
1235 | proc_des *task; /* current executing task... */ |
||
1236 | |||
1237 | task = &proc_table[exec_shadow]; |
||
1238 | |||
1239 | /* we have to check if the task was descheduled while serving |
||
1240 | signals... if so, it is useless the call to this function... |
||
1241 | because the task is already in it!!! (NB: the task can be |
||
1242 | descheduled because the signal handlers are executed with |
||
1243 | interrupts enabled...) */ |
||
1244 | if (task->control & TASK_DOING_SIGNALS) |
||
1245 | return; |
||
1246 | |||
1247 | task->control |= TASK_DOING_SIGNALS; |
||
1248 | |||
1249 | /* |
||
1250 | * Look for process pending signals that are unblocked, and deliver. |
||
1251 | */ |
||
1252 | while (procsigpending & ~task->sigmask) { |
||
1253 | /* NB: the while test should be indipendent from any local |
||
1254 | variable... because when we process signals there can be |
||
1255 | some context_change before we return from the |
||
1256 | kern_deliver-signals... |
||
1257 | */ |
||
1258 | int sig = ffs(procsigpending & ~task->sigmask); |
||
1259 | |||
1260 | /* Call with siglock and thread siglock locked */ |
||
1261 | kern_deliver_process_signal(sig); |
||
1262 | } |
||
1263 | |||
1264 | /* |
||
1265 | * Now deliver any pthread pending signals that are left. |
||
1266 | * NB: the pthread pending signals are NOT sent via sigqueue!!! |
||
1267 | */ |
||
1268 | while (task->sigpending & ~task->sigmask) { |
||
1269 | int sig = ffs(task->sigpending & ~task->sigmask); |
||
1270 | |||
1271 | /* Call at splhigh and thread locked */ |
||
1272 | kern_deliver_async_signal(sig); |
||
1273 | } |
||
1274 | task->control &= ~TASK_DOING_SIGNALS; |
||
1275 | } |
||
1276 | |||
1277 | /* |
||
1278 | * Actually deliver the signal to the task. At this point the signal |
||
1279 | * is going to be delivered, so it no longer matters if it is blocked. |
||
1280 | */ |
||
1281 | void |
||
1282 | really_deliver_signal(int sig, siginfo_t *info) |
||
1283 | { |
||
1284 | proc_des *task; /* current executing task... */ |
||
1285 | |||
1286 | sigset_t sigmask, oldmask; |
||
1287 | struct sigaction act; |
||
1288 | SYS_FLAGS f; |
||
1289 | |||
1290 | f = kern_fsave(); |
||
1291 | |||
1292 | task = &proc_table[exec_shadow]; |
||
1293 | |||
1294 | act = sigactions[sig]; |
||
1295 | |||
1296 | //kern_printf("Ci sono!!!flags=%d hand=%d sigaction=%d mask=%d",act.sa_flags, |
||
1297 | // (int)act.sa_handler, (int)act.sa_sigaction, (int)act.sa_mask); |
||
1298 | |||
1299 | /* |
||
1300 | * Ignored? |
||
1301 | */ |
||
1302 | if (!(act.sa_flags & SA_SIGINFO) && (act.sa_handler == SIG_IGN || |
||
1303 | act.sa_handler == SIG_ERR) ) |
||
1304 | return; |
||
1305 | |||
1306 | if (!(act.sa_flags & SA_SIGINFO) && act.sa_handler == SIG_DFL) { |
||
1307 | /* Default action for all signals is termination */ |
||
1308 | kern_printf("\nSignal number %d...\n",sig); |
||
1309 | if (act.sa_flags & SA_SIGINFO) |
||
1310 | kern_printf("with value : %d\n",info->si_value.sival_int); |
||
1311 | sys_abort(ASIG_DEFAULT_ACTION); |
||
1312 | } |
||
1313 | |||
1314 | /* |
||
1315 | * Set the signal mask for calling the handler. |
||
1316 | */ |
||
1317 | oldmask = sigmask = task->sigmask; |
||
1318 | sigaddset(&sigmask, sig); |
||
1319 | sigmask |= act.sa_mask; |
||
1320 | sigdelset(&task->sigpending, sig); |
||
1321 | task->sigmask = sigmask; |
||
1322 | kern_sti(); |
||
1323 | |||
1324 | /* |
||
1325 | * and call the handler ... |
||
1326 | */ |
||
1327 | if (act.sa_flags & SA_SIGINFO) |
||
1328 | act.sa_sigaction(sig, info, NULL); |
||
1329 | else |
||
1330 | ((void (*)(int, int, void *))act.sa_handler) |
||
1331 | (sig, info->si_value.sival_int, NULL); |
||
1332 | |||
1333 | /* NB: when we pass the kern_cli(), there can be the case that |
||
1334 | an irq (and/or a timer...) fired... and do a context change. |
||
1335 | so, we return here after an indefinite time... */ |
||
1336 | kern_cli(); |
||
1337 | task->sigmask = oldmask; |
||
1338 | |||
1339 | kern_frestore(f); |
||
1340 | } |
||
1341 | |||
1342 | |||
1343 | /*---------------------------------------------------------------------*/ |
||
1344 | /* S.HA.R.K. exceptions handling */ |
||
1345 | /*---------------------------------------------------------------------*/ |
||
1346 | |||
1347 | void kern_raise(int n, PID p) |
||
1348 | { |
||
1349 | union sigval v; |
||
1350 | PID sos; /* temp. PID */ |
||
1351 | |||
1352 | v.sival_int = n; |
||
1353 | // kern_printf("RAISE"); |
||
1354 | |||
1355 | /* sigqueue set the p field to exec_shadow... so whe change it for a |
||
1356 | little... because sigqueue fill descriptor with exec_shadow... */ |
||
1357 | kern_cli(); |
||
1358 | sos = exec_shadow; |
||
1359 | exec_shadow = p; |
||
1360 | |||
1361 | active_exc = 1; // see (*) |
||
1362 | sigqueue(0, SIGHEXC, v); |
||
1363 | active_exc = 0; |
||
1364 | |||
1365 | exec_shadow = sos; |
||
1366 | kern_sti(); |
||
1367 | |||
1368 | /* (*) |
||
1369 | when we are in an exception, we don't have to call the |
||
1370 | really_deliver signal. |
||
1371 | For example, when the capacity of a task is exausted, an event is |
||
1372 | called. this event simply call scheduler, that call the task_epilogue. |
||
1373 | |||
1374 | the task_epilogue checks the capacity and raise an exception, BUT |
||
1375 | we don't have to deliver this exception immediately. |
||
1376 | |||
1377 | Why? because the task pointed by exec_shadow was extracted from the |
||
1378 | ready queue (as sigqueue do normally...) and the exception does not have |
||
1379 | to be delivered to that task. It must be delivered |
||
1380 | only after we exit from the kern_raise (because the signal handler |
||
1381 | in SIGHEXC may be long and another timer interrupt can fire...), to |
||
1382 | another task... |
||
1383 | */ |
||
1384 | |||
1385 | } |
||
1386 | |||
1387 | |||
1388 | /*---------------------------------------------------------------------*/ |
||
1389 | /* S.Ha.R.K. interrupts handling */ |
||
1390 | /*---------------------------------------------------------------------*/ |
||
1391 | |||
1392 | /*----------------------------------------------------------------------*/ |
||
1393 | /* Interrupt table management. The following function install the fast */ |
||
1394 | /* handler and the sporadic task linked to the interrupt no. */ |
||
1395 | /* If the fast parameter is NULL, no handler is called. */ |
||
1396 | /* If the pi parameter is NIL no task is installed */ |
||
1397 | /*----------------------------------------------------------------------*/ |
||
1398 | |||
1399 | /* Interrupt handling table */ |
||
1400 | static struct int_des { |
||
1401 | void (*fast)(int n); |
||
1402 | PID proc_index; |
||
1403 | BYTE isUsed; |
||
1404 | } int_table[16]; |
||
1405 | |||
1406 | /* Warning the interrupt can cause a preemption! */ |
||
1407 | /* The fast handler is a standard piece of code which runs with */ |
||
1408 | /* interrupts enabled to allow interrupt nesting */ |
||
1409 | |||
1410 | void irq_fasthandler(void *n) |
||
1411 | { |
||
1412 | int no = *(int *)n; |
||
1413 | PID p; |
||
1414 | |||
1415 | /* tracer stuff */ |
||
1416 | trc_logevent(TRC_INTR,&no); |
||
1417 | |||
1418 | if (int_table[no].fast != NULL) { |
||
1419 | kern_sti(); |
||
1420 | (int_table[no].fast)(no); |
||
1421 | kern_cli(); |
||
1422 | } |
||
1423 | |||
1424 | /* If a sporadic process is linked,activate it */ |
||
1425 | p = int_table[no].proc_index; |
||
1426 | task_activate(p); // no problem if p == nil |
||
1427 | } |
||
1428 | |||
1429 | /*----------------------------------------------------------------------*/ |
||
1430 | /* Interrupt table management. The following function install the fast */ |
||
1431 | /* handler and the sporadic task linked to the interrupt no. */ |
||
1432 | /* If the fast parameter is NULL, no handler is called. */ |
||
1433 | /* If the pi parameter is NIL no task is installed */ |
||
1434 | /*----------------------------------------------------------------------*/ |
||
1435 | int handler_set(int no, void (*fast)(int n), PID pi) |
||
1436 | { |
||
1437 | SYS_FLAGS f; |
||
1438 | |||
1439 | if ((no < 1) || (no > 15)) { |
||
1440 | errno = EWRONG_INT_NO; |
||
1441 | return -1; |
||
1442 | } |
||
1443 | |||
1444 | f = kern_fsave(); |
||
1445 | //kern_printf("handler_set: no %d pid %d\n",no, pi); |
||
1446 | if (int_table[no].isUsed == TRUE) { |
||
1447 | kern_frestore(f); |
||
1448 | errno = EUSED_INT_NO; |
||
1449 | return -1; |
||
1450 | } |
||
1451 | int_table[no].fast = fast; |
||
1452 | int_table[no].proc_index = pi; |
||
1453 | int_table[no].isUsed = TRUE; |
||
1454 | |||
1455 | irq_bind(no, irq_fasthandler, INT_FORCE); |
||
1456 | irq_unmask(no); |
||
1457 | kern_frestore(f); |
||
1458 | |||
1459 | return 1; |
||
1460 | } |
||
1461 | |||
1462 | int handler_remove(int no) |
||
1463 | { |
||
1464 | SYS_FLAGS f; |
||
1465 | return 0; |
||
1466 | |||
1467 | if (no < 1 || no > 15) { |
||
1468 | errno = EWRONG_INT_NO; |
||
1469 | return -1; |
||
1470 | } |
||
1471 | |||
1472 | f = kern_fsave(); |
||
1473 | if (int_table[no].isUsed == FALSE) { |
||
1474 | kern_frestore(f); |
||
1475 | errno = EUNUSED_INT_NO; |
||
1476 | return -1; |
||
1477 | } |
||
1478 | |||
1479 | int_table[no].fast = NULL; |
||
1480 | int_table[no].proc_index = NIL; |
||
1481 | int_table[no].isUsed = FALSE; |
||
1482 | |||
1483 | irq_bind(no,NULL, INT_PREEMPTABLE); |
||
1484 | irq_mask(no); |
||
1485 | kern_frestore(f); |
||
1486 | |||
1487 | return 1; |
||
1488 | |||
1489 | } |
||
1490 | |||
1491 | /* this is the test that is done when a task is being killed |
||
1492 | and it is waiting on a sigwait */ |
||
1493 | static int signal_cancellation_point(PID i, void *arg) |
||
1494 | { |
||
1495 | LEVEL l; |
||
1496 | |||
1497 | if (proc_table[i].status == WAIT_SIG) { |
||
1498 | |||
1499 | if (proc_table[i].delay_timer != -1) { |
||
1500 | event_delete(proc_table[i].delay_timer); |
||
1501 | proc_table[i].delay_timer = -1; |
||
1502 | } |
||
1503 | |||
29 | pj | 1504 | iq_extract(i, &sigwaiters); |
2 | pj | 1505 | |
1506 | l = proc_table[i].task_level; |
||
1507 | level_table[l]->task_insert(l,i); |
||
1508 | |||
1509 | return 1; |
||
1510 | } |
||
1511 | else if (proc_table[i].status == WAIT_SIGSUSPEND) { |
||
1512 | |||
1513 | l = proc_table[i].task_level; |
||
1514 | level_table[l]->task_insert(l,i); |
||
1515 | |||
1516 | return 1; |
||
1517 | } |
||
1518 | |||
1519 | |||
1520 | return 0; |
||
1521 | } |
||
1522 | |||
1523 | void signals_init() |
||
1524 | { |
||
1525 | int i; |
||
1526 | |||
1527 | /* Initialize the default signal actions and the signal |
||
1528 | queue headers. */ |
||
1529 | for (i = 0; i < SIG_MAX; i++) { |
||
1530 | sigactions[i].sa_handler = SIG_DFL; |
||
1531 | sigactions[i].sa_flags = 0; |
||
1532 | sigactions[i].sa_mask = 0; |
||
1533 | sigactions[i].sa_sigaction = 0; |
||
1534 | sigqueued[i] = -1; |
||
1535 | } |
||
1536 | |||
1537 | /* Initialize the signal queue */ |
||
1538 | for (i=0; i < SIGQUEUE_MAX-1; i++) { |
||
1539 | sig_queue[i].next = i+1; |
||
1540 | sig_queue[i].flags = 0; |
||
1541 | } |
||
1542 | sig_queue[SIGQUEUE_MAX-1].next = NIL; |
||
1543 | sig_queue[SIGQUEUE_MAX-1].flags = 0; |
||
1544 | sigqueue_free = 0; |
||
1545 | |||
1546 | procsigpending = 0; |
||
1547 | |||
29 | pj | 1548 | iq_init(&sigwaiters, &freedesc, 0); |
2 | pj | 1549 | alarm_timer = -1; |
1550 | |||
1551 | /* Interrupt handling init */ |
||
1552 | for (i=0; i<16; i++) { |
||
1553 | int_table[i].fast = NULL; |
||
1554 | int_table[i].proc_index = NIL; |
||
1555 | int_table[i].isUsed = FALSE; |
||
1556 | } |
||
1557 | |||
1558 | register_cancellation_point(signal_cancellation_point, NULL); |
||
1559 | } |
||
1560 | |||
1561 |