Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2 | pj | 1 | /* |
2 | * Project: S.Ha.R.K. |
||
3 | * |
||
4 | * Coordinators: |
||
5 | * Giorgio Buttazzo <giorgio@sssup.it> |
||
6 | * Paolo Gai <pj@gandalf.sssup.it> |
||
7 | * |
||
8 | * Authors : |
||
9 | * Paolo Gai <pj@gandalf.sssup.it> |
||
10 | * (see the web pages for full authors list) |
||
11 | * |
||
12 | * ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
||
13 | * |
||
14 | * http://www.sssup.it |
||
15 | * http://retis.sssup.it |
||
16 | * http://shark.sssup.it |
||
17 | */ |
||
18 | |||
19 | /** |
||
20 | ------------ |
||
21 | CVS : $Id: signal.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $ |
||
22 | |||
23 | File: $File$ |
||
24 | Revision: $Revision: 1.1.1.1 $ |
||
25 | Last update: $Date: 2002-03-29 14:12:52 $ |
||
26 | ------------ |
||
27 | |||
28 | This file contains: |
||
29 | |||
30 | Signal Handling |
||
31 | |||
32 | - Data structures |
||
33 | - sigset_t handling functions |
||
34 | |||
35 | **/ |
||
36 | |||
37 | /* |
||
38 | * Copyright (C) 2000 Paolo Gai |
||
39 | * |
||
40 | * This program is free software; you can redistribute it and/or modify |
||
41 | * it under the terms of the GNU General Public License as published by |
||
42 | * the Free Software Foundation; either version 2 of the License, or |
||
43 | * (at your option) any later version. |
||
44 | * |
||
45 | * This program is distributed in the hope that it will be useful, |
||
46 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
47 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||
48 | * GNU General Public License for more details. |
||
49 | * |
||
50 | * You should have received a copy of the GNU General Public License |
||
51 | * along with this program; if not, write to the Free Software |
||
52 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
||
53 | * |
||
54 | */ |
||
55 | |||
56 | /* |
||
57 | * some functions are inspired on the implementation of OsKit.. |
||
58 | * |
||
59 | * Copyright (c) 1997, 1998, 1999 University of Utah and the Flux Group. |
||
60 | * All rights reserved. |
||
61 | * |
||
62 | * [...] The OSKit is free software, also known |
||
63 | * as "open source;" you can redistribute it and/or modify it under the terms |
||
64 | * of the GNU General Public License (GPL), version 2, as published by the Free |
||
65 | * Software Foundation (FSF). To explore alternate licensing terms, contact |
||
66 | * the University of Utah at csl-dist@cs.utah.edu or +1-801-585-3271. |
||
67 | * |
||
68 | * The OSKit is distributed in the hope that it will be useful, but WITHOUT ANY |
||
69 | * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
||
70 | * FOR A PARTICULAR PURPOSE. See the GPL for more details. You should have |
||
71 | * received a copy of the GPL along with the OSKit; see the file COPYING. If |
||
72 | * not, write to the FSF, 59 Temple Place #330, Boston, MA 02111-1307, USA. |
||
73 | */ |
||
74 | |||
75 | |||
76 | |||
77 | #include <ll/ll.h> |
||
78 | #include <ll/stdlib.h> |
||
79 | #include <ll/stdio.h> |
||
80 | #include <ll/i386/pic.h> |
||
81 | #include <signal.h> |
||
82 | #include <errno.h> |
||
83 | #include <kernel/descr.h> |
||
84 | #include <kernel/var.h> |
||
85 | #include <kernel/func.h> |
||
86 | #include <kernel/trace.h> |
||
87 | |||
88 | /* look at nanoslp.c */ |
||
89 | int nanosleep_interrupted_by_signal(PID i); |
||
90 | |||
91 | |||
92 | /*---------------------------------------------------------------------*/ |
||
93 | /* Data structures */ |
||
94 | /*---------------------------------------------------------------------*/ |
||
95 | |||
96 | /*+ A flag, see kern_raise +*/ |
||
97 | static int active_exc = 0; |
||
98 | |||
99 | /*+ The signal table... +*/ |
||
100 | static struct sigaction sigactions[SIG_MAX]; |
||
101 | |||
102 | /*+ There is a global (or "process") set of pending signals. |
||
103 | kill() and sigqueue() affect the process pending set. |
||
104 | +*/ |
||
105 | static sigset_t procsigpending; |
||
106 | |||
107 | /* |
||
108 | * A queue of all threads waiting in sigwait. |
||
109 | * It is not static because it is used into the task_kill...ð |
||
110 | */ |
||
111 | static QUEUE sigwaiters; |
||
112 | |||
113 | |||
114 | /*+ An array of queues of pending signals posted with sigqueue(). +*/ |
||
115 | static SIGQ sigqueued[SIG_MAX]; |
||
116 | |||
117 | /*+ We avoid malloc in interrupt handlers by preallocating the queue |
||
118 | entries for sig_queued above. |
||
119 | it is used also in kernel/time.c +*/ |
||
120 | SIGQ sigqueue_free; |
||
121 | |||
122 | /*+ this is the signal queue... +*/ |
||
123 | sig_queue_entry sig_queue[SIGQUEUE_MAX]; |
||
124 | |||
125 | /*+ alarm stuffs +*/ |
||
126 | static struct timespec alarm_time; |
||
127 | static int alarm_timer; |
||
128 | |||
129 | |||
130 | /* returns the first non-zero bit... */ |
||
131 | static int ffs(int value) |
||
132 | { |
||
133 | int x; |
||
134 | |||
135 | for (x=0; value; x++, value = value>>1) |
||
136 | if (value & 1) |
||
137 | return x; |
||
138 | return 0; |
||
139 | } |
||
140 | |||
141 | /*---------------------------------------------------------------------*/ |
||
142 | /* interruptable function registration... */ |
||
143 | /*---------------------------------------------------------------------*/ |
||
144 | |||
145 | |||
146 | /*+ this structure contains the functions to be called to test if a |
||
147 | task is blocked on a cancellation point +*/ |
||
148 | static struct { |
||
149 | int (*test)(PID p, void *arg); |
||
150 | void *arg; |
||
151 | } interruptable_table[MAX_SIGINTPOINTS]; |
||
152 | |||
153 | static int interruptable_points = 0; |
||
154 | |||
155 | |||
156 | /*+ This function register a cancellation point into the system. |
||
157 | Be careful!!! no check are performed... +*/ |
||
158 | void register_interruptable_point(int (*func)(PID p, void *arg), void *arg) |
||
159 | { |
||
160 | interruptable_table[interruptable_points].test = func; |
||
161 | interruptable_table[interruptable_points].arg = arg; |
||
162 | interruptable_points++; |
||
163 | } |
||
164 | |||
165 | static void test_interruptable_points(PID i) |
||
166 | { |
||
167 | int j; |
||
168 | |||
169 | /* check if the task is blocked on a cancellation point */ |
||
170 | for (j=0; j<interruptable_points; j++) |
||
171 | if (interruptable_table[j].test(i,interruptable_table[j].arg)) |
||
172 | break; |
||
173 | } |
||
174 | |||
175 | |||
176 | /*---------------------------------------------------------------------*/ |
||
177 | /* sigset_t handling functions */ |
||
178 | /*---------------------------------------------------------------------*/ |
||
179 | |||
180 | /* These functions will become soon macros... */ |
||
181 | int sigemptyset(sigset_t *set) |
||
182 | { |
||
183 | *set = 0; |
||
184 | |||
185 | return 0; |
||
186 | } |
||
187 | |||
188 | int sigfillset(sigset_t *set) |
||
189 | { |
||
190 | *set=0xFFFFFFFFUL; |
||
191 | |||
192 | return 0; |
||
193 | } |
||
194 | |||
195 | int sigaddset(sigset_t *set, int signo) |
||
196 | { |
||
197 | if (signo < 0 || signo >= SIG_MAX) |
||
198 | { |
||
199 | errno = EINVAL; |
||
200 | return -1; |
||
201 | } |
||
202 | |||
203 | *set |= 1 << signo; |
||
204 | return 0; |
||
205 | } |
||
206 | |||
207 | |||
208 | int sigdelset(sigset_t *set, int signo) |
||
209 | { |
||
210 | if (signo < 0 || signo >= SIG_MAX) |
||
211 | { |
||
212 | errno = EINVAL; |
||
213 | return -1; |
||
214 | } |
||
215 | |||
216 | *set &= ~(1 << signo); |
||
217 | return 0; |
||
218 | } |
||
219 | |||
220 | int sigismember(const sigset_t *set, int signo) |
||
221 | { |
||
222 | if (signo < 0 || signo >= SIG_MAX) |
||
223 | { |
||
224 | errno = EINVAL; |
||
225 | return -1; |
||
226 | } |
||
227 | |||
228 | return *set & (1 << signo ); |
||
229 | } |
||
230 | |||
231 | |||
232 | /*---------------------------------------------------------------------*/ |
||
233 | /* Finally, the public functions */ |
||
234 | /*---------------------------------------------------------------------*/ |
||
235 | |||
236 | /* |
||
237 | * Prototypes. |
||
238 | */ |
||
239 | void really_deliver_signal(int sig, siginfo_t *code); |
||
240 | void kern_deliver_async_signal(int sig); |
||
241 | void kern_deliver_process_signal(int sig); |
||
242 | |||
243 | int task_sigmask(int how, const sigset_t *set, sigset_t *oset) |
||
244 | { |
||
245 | proc_des *task; /* current executing task... */ |
||
246 | int err = 0; |
||
247 | |||
248 | kern_cli(); |
||
249 | |||
250 | task = &proc_table[exec_shadow]; |
||
251 | |||
252 | if (oset) |
||
253 | *oset = task->sigmask; |
||
254 | |||
255 | if (set) { |
||
256 | switch (how) { |
||
257 | case SIG_BLOCK: |
||
258 | task->sigmask |= *set; |
||
259 | break; |
||
260 | case SIG_UNBLOCK: |
||
261 | task->sigmask &= ~*set; |
||
262 | break; |
||
263 | case SIG_SETMASK: |
||
264 | task->sigmask = *set; |
||
265 | break; |
||
266 | default: |
||
267 | err = EINVAL; |
||
268 | } |
||
269 | } |
||
270 | |||
271 | /* |
||
272 | * Look for process pending signals that are unblocked, and deliver. |
||
273 | */ |
||
274 | while (procsigpending & ~task->sigmask) { |
||
275 | int sig = ffs(procsigpending & ~task->sigmask); |
||
276 | kern_deliver_process_signal(sig); |
||
277 | } |
||
278 | |||
279 | /* |
||
280 | * Look for task pending signals that are unblocked, and deliver. |
||
281 | */ |
||
282 | while (task->sigpending & ~task->sigmask) { |
||
283 | int sig = ffs(task->sigpending & ~task->sigmask); |
||
284 | kern_deliver_async_signal(sig); |
||
285 | } |
||
286 | |||
287 | kern_sti(); |
||
288 | return err; |
||
289 | } |
||
290 | |||
291 | /* |
||
292 | * This can be called out of an interrupt handler, say from an alarm |
||
293 | * expiration. |
||
294 | */ |
||
295 | int |
||
296 | task_signal(PID p, int signo) |
||
297 | { |
||
298 | // int enabled; |
||
299 | |||
300 | /* Error check? Sure! */ |
||
301 | if (!signo) |
||
302 | return 0; |
||
303 | |||
304 | if (signo < 0 || signo >= SIG_MAX) |
||
305 | return EINVAL; |
||
306 | |||
307 | if (proc_table[p].status == FREE) |
||
308 | return EINVAL; |
||
309 | |||
310 | kern_cli(); |
||
311 | |||
312 | /* |
||
313 | * Look at the process sigactions. If the "process" is ignoring |
||
314 | * the signal, then the signal is not placed in the pending list. |
||
315 | */ |
||
316 | if (!(sigactions[signo].sa_flags & SA_SIGINFO) && |
||
317 | sigactions[signo].sa_handler == SIG_IGN) { |
||
318 | kern_sti(); |
||
319 | return 0; |
||
320 | } |
||
321 | |||
322 | /* |
||
323 | * Add the signal to list of pending signals for the target task. |
||
324 | */ |
||
325 | sigaddset(&proc_table[p].sigpending, signo); |
||
326 | |||
327 | /* check for an interruptable function!!! */ |
||
328 | test_interruptable_points(p); |
||
329 | |||
330 | if (proc_table[p].status == WAIT_SIGSUSPEND) { |
||
331 | LEVEL l; |
||
332 | |||
333 | /* Reactivate the task... */ |
||
334 | q_extract(p, &sigwaiters); |
||
335 | |||
336 | l = proc_table[p].task_level; |
||
337 | level_table[l]->task_insert(l,p); |
||
338 | |||
339 | } |
||
340 | |||
341 | |||
342 | /* |
||
343 | * If not in an interrupt, use this opportunity to deliver |
||
344 | * pending unblocked signals to the current thread. |
||
345 | */ |
||
346 | if (!ll_ActiveInt()) { |
||
347 | kern_deliver_pending_signals(); |
||
348 | } |
||
349 | |||
350 | kern_sti(); |
||
351 | return 0; |
||
352 | } |
||
353 | |||
354 | /* |
||
355 | * sigaction |
||
356 | */ |
||
357 | int |
||
358 | sigaction(int sig, const struct sigaction *act, struct sigaction *oact) |
||
359 | { |
||
360 | int sos; /* used to empty the sigqueue... */ |
||
361 | SYS_FLAGS f; |
||
362 | |||
363 | |||
364 | if (sig < 0 || sig >= SIG_MAX) |
||
365 | return errno = EINVAL, -1; |
||
366 | |||
367 | f = kern_fsave(); |
||
368 | |||
369 | if (oact) |
||
370 | *oact = sigactions[sig]; |
||
371 | if (act) |
||
372 | sigactions[sig] = *act; |
||
373 | |||
374 | /* |
||
375 | * If the action for this signal is being set to SIG_IGN or SIG_DFL, |
||
376 | * and that signal is process pending, then clear it. |
||
377 | */ |
||
378 | if (act && !(act->sa_flags & SA_SIGINFO) && |
||
379 | (act->sa_handler == SIG_IGN || act->sa_handler == SIG_DFL)) { |
||
380 | sos = sigqueued[sig]; |
||
381 | while (sos != -1) { |
||
382 | /* Remove the first entry and put it to the free |
||
383 | queue */ |
||
384 | sos = sig_queue[sigqueued[sig]].next; |
||
385 | |||
386 | if (sig_queue[sigqueued[sig]].flags & USED_FOR_TIMER) |
||
387 | sig_queue[sigqueued[sig]].flags &= ~SIGNAL_POSTED; |
||
388 | else { |
||
389 | sig_queue[sigqueued[sig]].next = sigqueue_free; |
||
390 | sigqueue_free = sigqueued[sig]; |
||
391 | } |
||
392 | } |
||
393 | sigqueued[sig] = -1; |
||
394 | sigdelset(&procsigpending, sig); |
||
395 | } |
||
396 | |||
397 | kern_frestore(f); |
||
398 | return 0; |
||
399 | } |
||
400 | |||
401 | /* |
||
402 | * sigprocmask. this is just task_sigmask |
||
403 | */ |
||
404 | int |
||
405 | sigprocmask(int how, const sigset_t *set, sigset_t *oset) |
||
406 | { |
||
407 | return task_sigmask(how, set, oset); |
||
408 | } |
||
409 | |||
410 | /* |
||
411 | * raise. this is just task_signal on itself. |
||
412 | */ |
||
413 | int |
||
414 | raise(int sig) |
||
415 | { |
||
416 | return task_signal(exec_shadow, sig); |
||
417 | } |
||
418 | |||
419 | /* |
||
420 | * kill. What does it mean to kill() in a multithreaded program? The POSIX |
||
421 | * spec says that a signal sent to a "process" shall be delivered to only |
||
422 | * one task. If no task has that signal unblocked, then the first |
||
423 | * task to unblock the signal is the lucky winner. Well, that means we |
||
424 | * need to have a global procsigpending to record process pending signals. |
||
425 | */ |
||
426 | int |
||
427 | kill(pid_t pid, int signo) |
||
428 | { |
||
429 | PID task; |
||
430 | PID i; |
||
431 | SYS_FLAGS f; |
||
432 | struct sigaction act; |
||
433 | |||
434 | /* Error check? Sure! */ |
||
435 | if (!signo) |
||
436 | return 0; |
||
437 | |||
438 | if (signo < 0 || signo >= SIG_MAX) |
||
439 | return EINVAL; |
||
440 | |||
441 | |||
442 | f = kern_fsave(); |
||
443 | |||
444 | act = sigactions[signo]; |
||
445 | |||
446 | if (!(act.sa_flags & SA_SIGINFO) && act.sa_handler == SIG_IGN) { |
||
447 | kern_frestore(f); |
||
448 | return 0; |
||
449 | } |
||
450 | |||
451 | /* |
||
452 | * Kill does not queue. If the signal is already pending, this |
||
453 | * one is tossed. |
||
454 | */ |
||
455 | if (sigismember(&procsigpending, signo)) { |
||
456 | kern_frestore(f); |
||
457 | return 0; |
||
458 | } |
||
459 | |||
460 | /* |
||
461 | * Make the signal process pending. |
||
462 | */ |
||
463 | sigaddset(&procsigpending, signo); |
||
464 | |||
465 | /* |
||
466 | * Look through the threads in sigwait to see if any of them |
||
467 | * is waiting for the signal. This is done as a separate pass |
||
468 | * since the value of the pthread sigmask is ignored (threads |
||
469 | * in sigwait will have blocked the signals being waited for). |
||
470 | */ |
||
471 | |||
472 | for (task = sigwaiters; |
||
473 | task != NIL; |
||
474 | task = proc_table[task].next) { |
||
475 | if (sigismember(&proc_table[task].sigwaiting, signo)) { |
||
476 | LEVEL l; |
||
477 | |||
478 | if (proc_table[task].status == WAIT_SIGSUSPEND) |
||
479 | sigaddset(&proc_table[task].sigpending, signo); |
||
480 | |||
481 | /* Reactivate the task... */ |
||
482 | q_extract(task, &sigwaiters); |
||
483 | l = proc_table[task].task_level; |
||
484 | level_table[l]->task_insert(l,task); |
||
485 | |||
486 | if (proc_table[task].delay_timer != -1) { |
||
487 | event_delete(proc_table[task].delay_timer); |
||
488 | proc_table[task].delay_timer = -1; |
||
489 | } |
||
490 | |||
491 | kern_frestore(f); |
||
492 | return 0; |
||
493 | } |
||
494 | } |
||
495 | |||
496 | /* |
||
497 | * No threads in sigwait. Too bad. Must find another thread to |
||
498 | * deliver it to. |
||
499 | */ |
||
500 | for (i = 1; i < MAX_PROC; i++) { |
||
501 | if (proc_table[i].status != FREE) { |
||
502 | if (! sigismember(&proc_table[i].sigmask, signo)) { |
||
503 | /* Add the signal to list of pending |
||
504 | signals for the target task. */ |
||
505 | sigaddset(&proc_table[i].sigpending, signo); |
||
506 | |||
507 | /* check for an interruptable function!!! */ |
||
508 | test_interruptable_points(i); |
||
509 | break; |
||
510 | } |
||
511 | } |
||
512 | } |
||
513 | |||
514 | /* |
||
515 | * If not in an interrupt, use this opportunity to deliver |
||
516 | * pending unblocked signals to the current thread. |
||
517 | */ |
||
518 | if (! ll_ActiveInt()) { |
||
519 | kern_deliver_pending_signals(); |
||
520 | } |
||
521 | |||
522 | kern_frestore(f); |
||
523 | return 0; |
||
524 | } |
||
525 | |||
526 | /* |
||
527 | * sigqueue internal: accept also the SI_XXX value |
||
528 | */ |
||
529 | int |
||
530 | sigqueue_internal(pid_t pid, int signo, const union sigval value, int si_code) |
||
531 | { |
||
532 | PID task; |
||
533 | SYS_FLAGS f; |
||
534 | int i; |
||
535 | |||
536 | int thingie; /* an element of the signal queue */ |
||
537 | int sos; /* used when inserting thinghie in |
||
538 | the signal queue */ |
||
539 | struct sigaction act; |
||
540 | |||
541 | /* Error check? Sure! */ |
||
542 | if (!signo) |
||
543 | return 0; |
||
544 | |||
545 | if (signo < 0 || signo >= SIG_MAX) |
||
546 | return EINVAL; |
||
547 | |||
548 | |||
549 | f = kern_fsave(); |
||
550 | /* |
||
551 | * Look at the process sigactions. If the "process" is ignoring |
||
552 | * the signal, then the signal is not placed in the pending list. |
||
553 | */ |
||
554 | act = sigactions[signo]; |
||
555 | |||
556 | if (!(act.sa_flags & SA_SIGINFO) && act.sa_handler == SIG_IGN) { |
||
557 | kern_frestore(f); |
||
558 | return 0; |
||
559 | } |
||
560 | |||
561 | |||
562 | /* |
||
563 | * If the flags does not include SA_SIGINFO, and there is already |
||
564 | * a signal pending, this new one is dropped. |
||
565 | */ |
||
566 | if ((! (act.sa_flags & SA_SIGINFO)) && |
||
567 | sigismember(&procsigpending, signo)) { |
||
568 | kern_frestore(f); |
||
569 | return 0; |
||
570 | } |
||
571 | |||
572 | /* |
||
573 | * Gotta have space for the new signal. |
||
574 | */ |
||
575 | if (sigqueue_free == -1) { |
||
576 | kern_frestore(f); |
||
577 | return EAGAIN; |
||
578 | } |
||
579 | |||
580 | /* |
||
581 | * Create a queue entry. |
||
582 | */ |
||
583 | thingie = sigqueue_free; |
||
584 | sigqueue_free = sig_queue[sigqueue_free].next; |
||
585 | |||
586 | sig_queue[thingie].info.si_signo = signo; |
||
587 | sig_queue[thingie].info.si_code = si_code; |
||
588 | sig_queue[thingie].info.si_value = value; |
||
589 | sig_queue[thingie].info.si_task = exec_shadow; |
||
590 | sig_queue[thingie].next = -1; |
||
591 | |||
592 | /* |
||
593 | * Queue the signal on the process. |
||
594 | */ |
||
595 | |||
596 | /* we insert the signal at the queue's tail */ |
||
597 | if (sigqueued[signo] == -1) |
||
598 | sigqueued[signo] = thingie; |
||
599 | else { |
||
600 | sos = sigqueued[signo]; |
||
601 | while (sig_queue[sos].next != -1) sos = sig_queue[sos].next; |
||
602 | sig_queue[sos].next = thingie; |
||
603 | } |
||
604 | sigaddset(&procsigpending, signo); |
||
605 | |||
606 | /* |
||
607 | * Look through the threads in sigwait to see if any of them |
||
608 | * is waiting for the signal. This is done as a separate pass |
||
609 | * since the value of the pthread sigmask is ignored (threads |
||
610 | * in sigwait will have blocked the signals being waited for). |
||
611 | * If we find one, wakeup that thread. Note that POSIX says that |
||
612 | * if multiple threads are sigwaiting for the same signal number, |
||
613 | * exactly one thread is woken up. The problem is how to maintain |
||
614 | * the FIFO order, and how to prevent lost signals in the case that |
||
615 | * a thread calls sigwait before the woken thread runs and gets it. |
||
616 | */ |
||
617 | for (task = sigwaiters; |
||
618 | task != NIL; |
||
619 | task = proc_table[task].next) { |
||
620 | if (sigismember(&proc_table[task].sigwaiting, signo)) { |
||
621 | LEVEL l; |
||
622 | |||
623 | if (proc_table[task].status == WAIT_SIGSUSPEND) |
||
624 | sigaddset(&proc_table[task].sigpending, signo); |
||
625 | |||
626 | /* Reactivate the task... */ |
||
627 | q_extract(task, &sigwaiters); |
||
628 | |||
629 | l = proc_table[task].task_level; |
||
630 | level_table[l]->task_insert(l,task); |
||
631 | |||
632 | if (proc_table[task].delay_timer != -1) { |
||
633 | event_delete(proc_table[task].delay_timer); |
||
634 | proc_table[task].delay_timer = -1; |
||
635 | } |
||
636 | |||
637 | kern_frestore(f); |
||
638 | return 0; |
||
639 | |||
640 | } |
||
641 | } |
||
642 | |||
643 | /* |
||
644 | * Need to find a thread to deliver the signal to. Look for the |
||
645 | * first thread that is not blocking the signal, and send it the |
||
646 | * signal. It is my opinion that any program that is using sigwait, |
||
647 | * and has not blocked signals in all of its threads, is bogus. The |
||
648 | * same is true if the program is not using sigwait, and has the |
||
649 | * signal unblocked in more than one thread. |
||
650 | * Why? You might wake up a thread, but not have an actual queue |
||
651 | * entry left by the time it runs again and looks, since another |
||
652 | * thread could call sigwait and get that queue entry, or if there |
||
653 | * are multiple threads that can take the signal, one thread could |
||
654 | * get all the entries. This could result in an interrupted thread, |
||
655 | * but with no signal to deliver. Well, not much to do about it. |
||
656 | * Lets just queue the signal for the process, and let the chips |
||
657 | * fall where they may. |
||
658 | */ |
||
659 | for (i = 1; i < MAX_PROC; i++) { |
||
660 | if (proc_table[i].status != FREE) { |
||
661 | if (! sigismember(&proc_table[i].sigmask, signo)) { |
||
662 | /* Add the signal to list of pending |
||
663 | signals for the target task. */ |
||
664 | sigaddset(&proc_table[i].sigpending, signo); |
||
665 | |||
666 | /* check for an interruptable function!!! */ |
||
667 | test_interruptable_points(i); |
||
668 | |||
669 | break; |
||
670 | } |
||
671 | } |
||
672 | } |
||
673 | |||
674 | /* |
||
675 | * If not in an interrupt, use this opportunity to deliver |
||
676 | * pending unblocked signals to the current thread. |
||
677 | * (NB: a discussion on the flag active_exc is near the function |
||
678 | * kern_raise() ) |
||
679 | */ |
||
680 | if (! ll_ActiveInt() && active_exc == 0) { |
||
681 | kern_deliver_pending_signals(); |
||
682 | } |
||
683 | |||
684 | kern_frestore(f); |
||
685 | return 0; |
||
686 | } |
||
687 | |||
688 | static void sigwait_timer(void *arg) |
||
689 | { |
||
690 | PID p = (PID)arg; |
||
691 | LEVEL l; |
||
692 | |||
693 | /* reset the event timer */ |
||
694 | proc_table[p].delay_timer = -1; |
||
695 | |||
696 | /* set the timeout flag */ |
||
697 | proc_table[p].control |= SIGTIMEOUT_EXPIRED; |
||
698 | |||
699 | /* insert the task into the ready queue and extract it from the waiters */ |
||
700 | q_extract(p, &sigwaiters); |
||
701 | |||
702 | l = proc_table[p].task_level; |
||
703 | level_table[l]->task_insert(l,p); |
||
704 | |||
705 | event_need_reschedule(); |
||
706 | } |
||
707 | |||
708 | /* |
||
709 | * Sigwait. Sigwait overrides the state of the pthread sigmask and the global |
||
710 | * sigactions. The caller *must* block the set of signals in "set", before |
||
711 | * calling sigwait, otherwise the behaviour is undefined (which means that |
||
712 | * the caller will take an async signal anyway, and sigwait will return EINTR. |
||
713 | */ |
||
714 | int |
||
715 | kern_sigwait_internal(const sigset_t *set, |
||
716 | siginfo_t *info, const struct timespec *timeout) |
||
717 | { |
||
718 | proc_des *pthread = &proc_table[exec_shadow]; |
||
719 | int thissig; |
||
720 | |||
721 | struct timespec ty; |
||
722 | TIME tx; |
||
723 | LEVEL l; |
||
724 | |||
725 | task_testcancel(); |
||
726 | |||
727 | /* siglock and pthread siglock are taken from an interrupt handler */ |
||
728 | kern_cli(); |
||
729 | |||
730 | /* |
||
731 | * First check for process pending signals. Must take and hold |
||
732 | * the global siglock to prevent races with kill() and sigqueue(). |
||
733 | */ |
||
734 | if (procsigpending & *set) { |
||
735 | int sos; |
||
736 | |||
737 | thissig = ffs(procsigpending & *set); |
||
738 | |||
739 | /* |
||
740 | * Sent with kill(). Using sigwait and kill is Bogus! |
||
741 | */ |
||
742 | if (sigqueued[thissig] == -1) { |
||
743 | info->si_signo = thissig; |
||
744 | info->si_code = SI_USER; |
||
745 | info->si_value.sival_int = 0; |
||
746 | |||
747 | sigdelset(&pthread->sigpending, thissig); |
||
748 | sigdelset(&procsigpending, thissig); |
||
749 | kern_sti(); |
||
750 | return 0; |
||
751 | } |
||
752 | |||
753 | /* |
||
754 | * Grab the first queue entry. |
||
755 | */ |
||
756 | sos = sigqueued[thissig]; |
||
757 | sigqueued[thissig] = sig_queue[sigqueued[thissig]].next; |
||
758 | |||
759 | /* |
||
760 | * If that was the last one, reset the process procsigpending. |
||
761 | */ |
||
762 | if (sigqueued[thissig] == -1) |
||
763 | sigdelset(&procsigpending, thissig); |
||
764 | sigdelset(&pthread->sigpending, thissig); |
||
765 | |||
766 | /* |
||
767 | * Copy the information and free the queue entry. |
||
768 | */ |
||
769 | info->si_signo = sig_queue[sos].info.si_signo; |
||
770 | info->si_code = sig_queue[sos].info.si_code; |
||
771 | info->si_value.sival_int = sig_queue[sos].info.si_value.sival_int; |
||
772 | |||
773 | if (sig_queue[sos].flags & USED_FOR_TIMER) |
||
774 | sig_queue[sos].flags &= ~SIGNAL_POSTED; |
||
775 | else { |
||
776 | sig_queue[sos].next = sigqueue_free; |
||
777 | sigqueue_free = sos; |
||
778 | } |
||
779 | kern_sti(); |
||
780 | return 0; |
||
781 | } |
||
782 | |||
783 | /* |
||
784 | * Now check for pthread pending signals. |
||
785 | */ |
||
786 | if (pthread->sigpending & *set) { |
||
787 | thissig = ffs(pthread->sigpending & *set); |
||
788 | info->si_signo = thissig; |
||
789 | info->si_code = SI_USER; |
||
790 | info->si_value.sival_int = 0; |
||
791 | sigdelset(&pthread->sigpending, thissig); |
||
792 | kern_sti(); |
||
793 | return 0; |
||
794 | } |
||
795 | |||
796 | /* |
||
797 | * For timed wait, if nothing is available and the timeout value |
||
798 | * is zero, its an error. |
||
799 | */ |
||
800 | if (timeout && timeout->tv_sec == 0 && timeout->tv_nsec == 0) { |
||
801 | kern_sti(); |
||
802 | return EAGAIN; |
||
803 | } |
||
804 | |||
805 | /* |
||
806 | * Grab the wait lock and set the sigwaiting mask. Once that is done, |
||
807 | * release the thread siglock; Another thread can try and wake this |
||
808 | * thread up as a result of seeing it in sigwait, but the actual |
||
809 | * wakeup will be delayed until the waitlock is released in the switch |
||
810 | * code. |
||
811 | */ |
||
812 | pthread->sigwaiting = *set; |
||
813 | |||
814 | /* now, we really block the task... */ |
||
815 | proc_table[exec_shadow].context = kern_context_save(); |
||
816 | |||
817 | /* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
||
818 | ll_gettime(TIME_EXACT, &schedule_time); |
||
819 | SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
||
820 | tx = TIMESPEC2USEC(&ty); |
||
821 | proc_table[exec_shadow].avail_time -= tx; |
||
822 | jet_update_slice(tx); |
||
823 | if (cap_timer != NIL) { |
||
824 | event_delete(cap_timer); |
||
825 | cap_timer = NIL; |
||
826 | } |
||
827 | l = proc_table[exec_shadow].task_level; |
||
828 | level_table[l]->task_extract(l,exec_shadow); |
||
829 | |||
830 | /* |
||
831 | * Add this thread to the list of threads in sigwait. Once that is |
||
832 | * done, it is safe to release the global siglock, which will allow |
||
833 | * another thread to scan the sigwaiters list. As above, it might |
||
834 | * find a thread in sigwait, but it will not be able to wake it up |
||
835 | * until the waitlock is released in the switch code. |
||
836 | */ |
||
837 | q_insertfirst(exec_shadow, &sigwaiters); |
||
838 | proc_table[exec_shadow].status = WAIT_SIG; |
||
839 | |||
840 | if (timeout) { |
||
841 | /* we can use the delaytimer because if we are here we are not in a |
||
842 | task_delay */ |
||
843 | struct timespec t, abstime; |
||
844 | ll_gettime(TIME_EXACT, &t); |
||
845 | ADDTIMESPEC(&t, timeout, &abstime); |
||
846 | |||
847 | proc_table[exec_shadow].delay_timer = |
||
848 | kern_event_post(&abstime,sigwait_timer,(void *)exec_shadow); |
||
849 | } |
||
850 | |||
851 | /* and finally we reschedule */ |
||
852 | exec = exec_shadow = -1; |
||
853 | scheduler(); |
||
854 | ll_context_to(proc_table[exec_shadow].context); |
||
855 | |||
856 | task_testcancel(); |
||
857 | |||
858 | pthread->sigwaiting = 0; |
||
859 | |||
860 | /* |
||
861 | * Look for timeout. |
||
862 | */ |
||
863 | if (proc_table[exec_shadow].control & SIGTIMEOUT_EXPIRED) { |
||
864 | kern_sti(); |
||
865 | return EAGAIN; |
||
866 | } |
||
867 | |||
868 | /* |
||
869 | * Look for a wakeup to deliver a queued signal. This would come |
||
870 | * either from kill() or from sigqueue(). |
||
871 | */ |
||
872 | if (procsigpending & *set) { |
||
873 | int sos; |
||
874 | |||
875 | thissig = ffs(procsigpending & *set); |
||
876 | |||
877 | /* |
||
878 | * Sent with kill(). Using sigwait and kill is Bogus! |
||
879 | */ |
||
880 | if (sigqueued[thissig] == -1) { |
||
881 | info->si_signo = thissig; |
||
882 | info->si_code = SI_USER; |
||
883 | info->si_value.sival_int = 0; |
||
884 | |||
885 | sigdelset(&procsigpending, thissig); |
||
886 | kern_sti(); |
||
887 | return 0; |
||
888 | } |
||
889 | |||
890 | /* |
||
891 | * Grab the first queue entry. |
||
892 | */ |
||
893 | sos = q_getfirst(&sigqueued[thissig]); |
||
894 | |||
895 | /* |
||
896 | * If that was the last one, reset the process procsigpending. |
||
897 | */ |
||
898 | if (sigqueued[thissig] == -1) |
||
899 | sigdelset(&procsigpending, thissig); |
||
900 | |||
901 | /* |
||
902 | * Copy the information and free the queue entry. |
||
903 | */ |
||
904 | info->si_signo = sig_queue[sos].info.si_signo; |
||
905 | info->si_code = sig_queue[sos].info.si_code; |
||
906 | info->si_value.sival_int = sig_queue[sos].info.si_value.sival_int; |
||
907 | |||
908 | if (sig_queue[sos].flags & USED_FOR_TIMER) |
||
909 | sig_queue[sos].flags &= ~SIGNAL_POSTED; |
||
910 | else { |
||
911 | sig_queue[sos].next = sigqueue_free; |
||
912 | sigqueue_free = sos; |
||
913 | } |
||
914 | |||
915 | kern_sti(); |
||
916 | return 0; |
||
917 | } |
||
918 | |||
919 | /* |
||
920 | * Well, at the moment I am going to assume that if this thread |
||
921 | * wakes up, and there is no signal pending in the waitset, the |
||
922 | * thread wait was interrupted for some other reason. Return EINTR. |
||
923 | */ |
||
924 | if (! (pthread->sigpending & *set)) { |
||
925 | kern_sti(); |
||
926 | return EINTR; |
||
927 | } |
||
928 | |||
929 | /* |
||
930 | * Otherwise, get the first signal and return it. |
||
931 | */ |
||
932 | thissig = ffs(pthread->sigpending & *set); |
||
933 | info->si_signo = thissig; |
||
934 | info->si_code = SI_USER; |
||
935 | info->si_value.sival_int = 0; |
||
936 | sigdelset(&pthread->sigpending, thissig); |
||
937 | kern_sti(); |
||
938 | return 0; |
||
939 | } |
||
940 | |||
941 | /* |
||
942 | * Sigwait. |
||
943 | */ |
||
944 | int |
||
945 | sigwait(const sigset_t *set, int *sig) |
||
946 | { |
||
947 | siginfo_t info; |
||
948 | int rc; |
||
949 | |||
950 | memset(&info, 0, sizeof(info)); |
||
951 | |||
952 | rc = kern_sigwait_internal(set, &info, 0); |
||
953 | |||
954 | if (rc) |
||
955 | return rc; |
||
956 | |||
957 | *sig = info.si_signo; |
||
958 | return 0; |
||
959 | } |
||
960 | |||
961 | /* |
||
962 | * Sigwaitinfo. |
||
963 | */ |
||
964 | int |
||
965 | sigwaitinfo(const sigset_t *set, siginfo_t *info) |
||
966 | { |
||
967 | return kern_sigwait_internal(set, info, 0); |
||
968 | } |
||
969 | |||
970 | /* |
||
971 | * Sigtimedwait. |
||
972 | */ |
||
973 | int |
||
974 | sigtimedwait(const sigset_t *set, |
||
975 | siginfo_t *info, const struct timespec *timeout) |
||
976 | { |
||
977 | if (! timeout) |
||
978 | return EINVAL; |
||
979 | |||
980 | return kern_sigwait_internal(set, info, timeout); |
||
981 | } |
||
982 | |||
983 | /* |
||
984 | * Signal |
||
985 | */ |
||
986 | void (*signal(int signum, void (*handler)(int)))(int) |
||
987 | { |
||
988 | struct sigaction act, oact; |
||
989 | int olderrno; |
||
990 | void (*retvalue)(int); |
||
991 | |||
992 | act.sa_handler = handler; |
||
993 | sigemptyset(&act.sa_mask); |
||
994 | act.sa_flags = 0; |
||
995 | |||
996 | olderrno = errno; |
||
997 | if (sigaction(signum, &act, &oact)) |
||
998 | retvalue = SIG_ERR; |
||
999 | else |
||
1000 | if (oact.sa_flags & SA_SIGINFO) |
||
1001 | retvalue = SIG_ERR; |
||
1002 | else |
||
1003 | retvalue = oact.sa_handler; |
||
1004 | |||
1005 | errno = olderrno; |
||
1006 | |||
1007 | return retvalue; |
||
1008 | |||
1009 | } |
||
1010 | |||
1011 | |||
1012 | /* |
||
1013 | * sigpending |
||
1014 | */ |
||
1015 | int sigpending(sigset_t *set) |
||
1016 | { |
||
1017 | *set = procsigpending | proc_table[exec_shadow].sigpending; |
||
1018 | return 0; |
||
1019 | } |
||
1020 | |||
1021 | |||
1022 | /* |
||
1023 | * sigsuspend |
||
1024 | */ |
||
1025 | int sigsuspend(const sigset_t *set) |
||
1026 | { |
||
1027 | proc_des *pthread = &proc_table[exec_shadow]; |
||
1028 | |||
1029 | struct timespec ty; |
||
1030 | TIME tx; |
||
1031 | LEVEL l; |
||
1032 | |||
1033 | task_testcancel(); |
||
1034 | |||
1035 | kern_cli(); |
||
1036 | |||
1037 | /* |
||
1038 | * Now check for pthread pending signals. |
||
1039 | */ |
||
1040 | if (pthread->sigpending & *set) { |
||
1041 | kern_deliver_pending_signals(); |
||
1042 | kern_sti(); |
||
1043 | return 0; |
||
1044 | } |
||
1045 | |||
1046 | /* |
||
1047 | * Grab the wait lock and set the sigwaiting mask. Once that is done, |
||
1048 | * release the thread siglock; Another thread can try and wake this |
||
1049 | * thread up as a result of seeing it in sigwait, but the actual |
||
1050 | * wakeup will be delayed until the waitlock is released in the switch |
||
1051 | * code. |
||
1052 | */ |
||
1053 | pthread->sigwaiting = *set; |
||
1054 | |||
1055 | /* now, we really block the task... */ |
||
1056 | proc_table[exec_shadow].context = kern_context_save(); |
||
1057 | |||
1058 | /* SAME AS SCHEDULER... manage the capacity event and the load_info */ |
||
1059 | ll_gettime(TIME_EXACT, &schedule_time); |
||
1060 | SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty); |
||
1061 | tx = TIMESPEC2USEC(&ty); |
||
1062 | proc_table[exec_shadow].avail_time -= tx; |
||
1063 | jet_update_slice(tx); |
||
1064 | if (cap_timer != NIL) { |
||
1065 | event_delete(cap_timer); |
||
1066 | cap_timer = NIL; |
||
1067 | } |
||
1068 | l = proc_table[exec_shadow].task_level; |
||
1069 | level_table[l]->task_extract(l,exec_shadow); |
||
1070 | |||
1071 | q_insertfirst(exec_shadow, &sigwaiters); |
||
1072 | proc_table[exec_shadow].status = WAIT_SIGSUSPEND; |
||
1073 | |||
1074 | /* and finally we reschedule */ |
||
1075 | exec = exec_shadow = -1; |
||
1076 | scheduler(); |
||
1077 | ll_context_to(proc_table[exec_shadow].context); |
||
1078 | |||
1079 | task_testcancel(); |
||
1080 | |||
1081 | /* |
||
1082 | * Well, at the moment I am going to assume that if this thread |
||
1083 | * wakes up, and there is no signal pending in the waitset, the |
||
1084 | * thread wait was interrupted for some other reason. Return EINTR. |
||
1085 | */ |
||
1086 | if (! (pthread->sigpending & *set)) { |
||
1087 | kern_sti(); |
||
1088 | return EINTR; |
||
1089 | } |
||
1090 | |||
1091 | /* |
||
1092 | * Otherwise, deliver the signals. |
||
1093 | */ |
||
1094 | kern_deliver_pending_signals(); |
||
1095 | kern_sti(); |
||
1096 | return 0; |
||
1097 | } |
||
1098 | |||
1099 | |||
1100 | void timer_alarmfire(void *arg) |
||
1101 | { |
||
1102 | alarm_timer = -1; |
||
1103 | |||
1104 | kill(0, SIGALRM); |
||
1105 | |||
1106 | event_need_reschedule(); |
||
1107 | } |
||
1108 | |||
1109 | /* |
||
1110 | * alarm |
||
1111 | */ |
||
1112 | unsigned int alarm(unsigned int seconds) |
||
1113 | { |
||
1114 | struct timespec returnvalue, temp; |
||
1115 | |||
1116 | kern_cli(); |
||
1117 | |||
1118 | ll_gettime(TIME_EXACT, &temp); |
||
1119 | |||
1120 | if (alarm_timer == -1) |
||
1121 | returnvalue.tv_sec = 0; |
||
1122 | else { |
||
1123 | SUBTIMESPEC(&alarm_time, &temp, &returnvalue); |
||
1124 | |||
1125 | event_delete(alarm_timer); |
||
1126 | } |
||
1127 | |||
1128 | if (seconds) { |
||
1129 | temp.tv_sec += seconds; |
||
1130 | TIMESPEC_ASSIGN(&alarm_time, &temp); |
||
1131 | alarm_timer = kern_event_post(&temp, timer_alarmfire, NULL); |
||
1132 | } |
||
1133 | else |
||
1134 | alarm_timer = -1; |
||
1135 | |||
1136 | kern_sti(); |
||
1137 | |||
1138 | return returnvalue.tv_sec; |
||
1139 | } |
||
1140 | |||
1141 | int pause(void) |
||
1142 | { |
||
1143 | sigset_t set; |
||
1144 | |||
1145 | sigfillset(&set); |
||
1146 | return sigsuspend(&set); |
||
1147 | } |
||
1148 | |||
1149 | /* |
||
1150 | * Internal stuff. |
||
1151 | */ |
||
1152 | |||
1153 | /* |
||
1154 | * Deliver an asynchronous signal. This must be called with interrupts |
||
1155 | * blocked and the pthread siglock held. |
||
1156 | */ |
||
1157 | void |
||
1158 | kern_deliver_async_signal(int sig) |
||
1159 | { |
||
1160 | siginfo_t siginfo; |
||
1161 | |||
1162 | siginfo.si_signo = sig; |
||
1163 | siginfo.si_code = SI_USER; |
||
1164 | siginfo.si_value.sival_int = 0; |
||
1165 | siginfo.si_task = exec_shadow; |
||
1166 | |||
1167 | really_deliver_signal(sig, &siginfo); |
||
1168 | } |
||
1169 | |||
1170 | /* |
||
1171 | * Deliver a process signals. This must be called with interrupts |
||
1172 | * blocked and the siglock and pthread siglock held. |
||
1173 | */ |
||
1174 | void |
||
1175 | kern_deliver_process_signal(int sig) |
||
1176 | { |
||
1177 | siginfo_t siginfo; |
||
1178 | int thingie; |
||
1179 | |||
1180 | /* |
||
1181 | * Sent with kill(). Using sigwait and kill is Bogus! |
||
1182 | */ |
||
1183 | if (sigqueued[sig] == -1) { |
||
1184 | siginfo.si_signo = sig; |
||
1185 | siginfo.si_code = SI_USER; |
||
1186 | siginfo.si_value.sival_int = 0; |
||
1187 | siginfo.si_task = exec_shadow; |
||
1188 | |||
1189 | sigdelset(&procsigpending, sig); |
||
1190 | goto deliver; |
||
1191 | } |
||
1192 | |||
1193 | /* |
||
1194 | * Grab the first queue entry. |
||
1195 | */ |
||
1196 | thingie = sigqueued[sig]; |
||
1197 | sigqueued[sig] = sig_queue[sigqueued[sig]].next; |
||
1198 | |||
1199 | /* |
||
1200 | * If that was the last one, reset the process sigpending. |
||
1201 | */ |
||
1202 | if (sigqueued[sig] == -1) |
||
1203 | sigdelset(&procsigpending, sig); |
||
1204 | |||
1205 | /* |
||
1206 | * Copy the information and free the queue entry. |
||
1207 | */ |
||
1208 | siginfo.si_signo = sig_queue[thingie].info.si_signo; |
||
1209 | siginfo.si_code = sig_queue[thingie].info.si_code; |
||
1210 | siginfo.si_value.sival_int = sig_queue[thingie].info.si_value.sival_int; |
||
1211 | siginfo.si_task = sig_queue[thingie].info.si_task; |
||
1212 | |||
1213 | if (sig_queue[thingie].flags & USED_FOR_TIMER) |
||
1214 | sig_queue[thingie].flags &= ~SIGNAL_POSTED; |
||
1215 | else { |
||
1216 | sig_queue[thingie].next = sigqueue_free; |
||
1217 | sigqueue_free = thingie; |
||
1218 | } |
||
1219 | |||
1220 | deliver: |
||
1221 | really_deliver_signal(sig, &siginfo); |
||
1222 | |||
1223 | } |
||
1224 | |||
1225 | /* |
||
1226 | * Deliver any pending signals. Called out of the context switch code |
||
1227 | * when a task switches in, and there are pending signals. |
||
1228 | * |
||
1229 | * Interrupts are blocked... |
||
1230 | */ |
||
1231 | void |
||
1232 | kern_deliver_pending_signals(void) |
||
1233 | { |
||
1234 | proc_des *task; /* current executing task... */ |
||
1235 | |||
1236 | task = &proc_table[exec_shadow]; |
||
1237 | |||
1238 | /* we have to check if the task was descheduled while serving |
||
1239 | signals... if so, it is useless the call to this function... |
||
1240 | because the task is already in it!!! (NB: the task can be |
||
1241 | descheduled because the signal handlers are executed with |
||
1242 | interrupts enabled...) */ |
||
1243 | if (task->control & TASK_DOING_SIGNALS) |
||
1244 | return; |
||
1245 | |||
1246 | task->control |= TASK_DOING_SIGNALS; |
||
1247 | |||
1248 | /* |
||
1249 | * Look for process pending signals that are unblocked, and deliver. |
||
1250 | */ |
||
1251 | while (procsigpending & ~task->sigmask) { |
||
1252 | /* NB: the while test should be indipendent from any local |
||
1253 | variable... because when we process signals there can be |
||
1254 | some context_change before we return from the |
||
1255 | kern_deliver-signals... |
||
1256 | */ |
||
1257 | int sig = ffs(procsigpending & ~task->sigmask); |
||
1258 | |||
1259 | /* Call with siglock and thread siglock locked */ |
||
1260 | kern_deliver_process_signal(sig); |
||
1261 | } |
||
1262 | |||
1263 | /* |
||
1264 | * Now deliver any pthread pending signals that are left. |
||
1265 | * NB: the pthread pending signals are NOT sent via sigqueue!!! |
||
1266 | */ |
||
1267 | while (task->sigpending & ~task->sigmask) { |
||
1268 | int sig = ffs(task->sigpending & ~task->sigmask); |
||
1269 | |||
1270 | /* Call at splhigh and thread locked */ |
||
1271 | kern_deliver_async_signal(sig); |
||
1272 | } |
||
1273 | task->control &= ~TASK_DOING_SIGNALS; |
||
1274 | } |
||
1275 | |||
1276 | /* |
||
1277 | * Actually deliver the signal to the task. At this point the signal |
||
1278 | * is going to be delivered, so it no longer matters if it is blocked. |
||
1279 | */ |
||
1280 | void |
||
1281 | really_deliver_signal(int sig, siginfo_t *info) |
||
1282 | { |
||
1283 | proc_des *task; /* current executing task... */ |
||
1284 | |||
1285 | sigset_t sigmask, oldmask; |
||
1286 | struct sigaction act; |
||
1287 | SYS_FLAGS f; |
||
1288 | |||
1289 | f = kern_fsave(); |
||
1290 | |||
1291 | task = &proc_table[exec_shadow]; |
||
1292 | |||
1293 | act = sigactions[sig]; |
||
1294 | |||
1295 | //kern_printf("Ci sono!!!flags=%d hand=%d sigaction=%d mask=%d",act.sa_flags, |
||
1296 | // (int)act.sa_handler, (int)act.sa_sigaction, (int)act.sa_mask); |
||
1297 | |||
1298 | /* |
||
1299 | * Ignored? |
||
1300 | */ |
||
1301 | if (!(act.sa_flags & SA_SIGINFO) && (act.sa_handler == SIG_IGN || |
||
1302 | act.sa_handler == SIG_ERR) ) |
||
1303 | return; |
||
1304 | |||
1305 | if (!(act.sa_flags & SA_SIGINFO) && act.sa_handler == SIG_DFL) { |
||
1306 | /* Default action for all signals is termination */ |
||
1307 | kern_printf("\nSignal number %d...\n",sig); |
||
1308 | if (act.sa_flags & SA_SIGINFO) |
||
1309 | kern_printf("with value : %d\n",info->si_value.sival_int); |
||
1310 | sys_abort(ASIG_DEFAULT_ACTION); |
||
1311 | } |
||
1312 | |||
1313 | /* |
||
1314 | * Set the signal mask for calling the handler. |
||
1315 | */ |
||
1316 | oldmask = sigmask = task->sigmask; |
||
1317 | sigaddset(&sigmask, sig); |
||
1318 | sigmask |= act.sa_mask; |
||
1319 | sigdelset(&task->sigpending, sig); |
||
1320 | task->sigmask = sigmask; |
||
1321 | kern_sti(); |
||
1322 | |||
1323 | /* |
||
1324 | * and call the handler ... |
||
1325 | */ |
||
1326 | if (act.sa_flags & SA_SIGINFO) |
||
1327 | act.sa_sigaction(sig, info, NULL); |
||
1328 | else |
||
1329 | ((void (*)(int, int, void *))act.sa_handler) |
||
1330 | (sig, info->si_value.sival_int, NULL); |
||
1331 | |||
1332 | /* NB: when we pass the kern_cli(), there can be the case that |
||
1333 | an irq (and/or a timer...) fired... and do a context change. |
||
1334 | so, we return here after an indefinite time... */ |
||
1335 | kern_cli(); |
||
1336 | task->sigmask = oldmask; |
||
1337 | |||
1338 | kern_frestore(f); |
||
1339 | } |
||
1340 | |||
1341 | |||
1342 | /*---------------------------------------------------------------------*/ |
||
1343 | /* S.HA.R.K. exceptions handling */ |
||
1344 | /*---------------------------------------------------------------------*/ |
||
1345 | |||
1346 | void kern_raise(int n, PID p) |
||
1347 | { |
||
1348 | union sigval v; |
||
1349 | PID sos; /* temp. PID */ |
||
1350 | |||
1351 | v.sival_int = n; |
||
1352 | // kern_printf("RAISE"); |
||
1353 | |||
1354 | /* sigqueue set the p field to exec_shadow... so whe change it for a |
||
1355 | little... because sigqueue fill descriptor with exec_shadow... */ |
||
1356 | kern_cli(); |
||
1357 | sos = exec_shadow; |
||
1358 | exec_shadow = p; |
||
1359 | |||
1360 | active_exc = 1; // see (*) |
||
1361 | sigqueue(0, SIGHEXC, v); |
||
1362 | active_exc = 0; |
||
1363 | |||
1364 | exec_shadow = sos; |
||
1365 | kern_sti(); |
||
1366 | |||
1367 | /* (*) |
||
1368 | when we are in an exception, we don't have to call the |
||
1369 | really_deliver signal. |
||
1370 | For example, when the capacity of a task is exausted, an event is |
||
1371 | called. this event simply call scheduler, that call the task_epilogue. |
||
1372 | |||
1373 | the task_epilogue checks the capacity and raise an exception, BUT |
||
1374 | we don't have to deliver this exception immediately. |
||
1375 | |||
1376 | Why? because the task pointed by exec_shadow was extracted from the |
||
1377 | ready queue (as sigqueue do normally...) and the exception does not have |
||
1378 | to be delivered to that task. It must be delivered |
||
1379 | only after we exit from the kern_raise (because the signal handler |
||
1380 | in SIGHEXC may be long and another timer interrupt can fire...), to |
||
1381 | another task... |
||
1382 | */ |
||
1383 | |||
1384 | } |
||
1385 | |||
1386 | |||
1387 | /*---------------------------------------------------------------------*/ |
||
1388 | /* S.Ha.R.K. interrupts handling */ |
||
1389 | /*---------------------------------------------------------------------*/ |
||
1390 | |||
1391 | /*----------------------------------------------------------------------*/ |
||
1392 | /* Interrupt table management. The following function install the fast */ |
||
1393 | /* handler and the sporadic task linked to the interrupt no. */ |
||
1394 | /* If the fast parameter is NULL, no handler is called. */ |
||
1395 | /* If the pi parameter is NIL no task is installed */ |
||
1396 | /*----------------------------------------------------------------------*/ |
||
1397 | |||
1398 | /* Interrupt handling table */ |
||
1399 | static struct int_des { |
||
1400 | void (*fast)(int n); |
||
1401 | PID proc_index; |
||
1402 | BYTE isUsed; |
||
1403 | } int_table[16]; |
||
1404 | |||
1405 | /* Warning the interrupt can cause a preemption! */ |
||
1406 | /* The fast handler is a standard piece of code which runs with */ |
||
1407 | /* interrupts enabled to allow interrupt nesting */ |
||
1408 | |||
1409 | void irq_fasthandler(void *n) |
||
1410 | { |
||
1411 | int no = *(int *)n; |
||
1412 | PID p; |
||
1413 | |||
1414 | /* tracer stuff */ |
||
1415 | trc_logevent(TRC_INTR,&no); |
||
1416 | |||
1417 | if (int_table[no].fast != NULL) { |
||
1418 | kern_sti(); |
||
1419 | (int_table[no].fast)(no); |
||
1420 | kern_cli(); |
||
1421 | } |
||
1422 | |||
1423 | /* If a sporadic process is linked,activate it */ |
||
1424 | p = int_table[no].proc_index; |
||
1425 | task_activate(p); // no problem if p == nil |
||
1426 | } |
||
1427 | |||
1428 | /*----------------------------------------------------------------------*/ |
||
1429 | /* Interrupt table management. The following function install the fast */ |
||
1430 | /* handler and the sporadic task linked to the interrupt no. */ |
||
1431 | /* If the fast parameter is NULL, no handler is called. */ |
||
1432 | /* If the pi parameter is NIL no task is installed */ |
||
1433 | /*----------------------------------------------------------------------*/ |
||
1434 | int handler_set(int no, void (*fast)(int n), PID pi) |
||
1435 | { |
||
1436 | SYS_FLAGS f; |
||
1437 | |||
1438 | if ((no < 1) || (no > 15)) { |
||
1439 | errno = EWRONG_INT_NO; |
||
1440 | return -1; |
||
1441 | } |
||
1442 | |||
1443 | f = kern_fsave(); |
||
1444 | //kern_printf("handler_set: no %d pid %d\n",no, pi); |
||
1445 | if (int_table[no].isUsed == TRUE) { |
||
1446 | kern_frestore(f); |
||
1447 | errno = EUSED_INT_NO; |
||
1448 | return -1; |
||
1449 | } |
||
1450 | int_table[no].fast = fast; |
||
1451 | int_table[no].proc_index = pi; |
||
1452 | int_table[no].isUsed = TRUE; |
||
1453 | |||
1454 | irq_bind(no, irq_fasthandler, INT_FORCE); |
||
1455 | irq_unmask(no); |
||
1456 | kern_frestore(f); |
||
1457 | |||
1458 | return 1; |
||
1459 | } |
||
1460 | |||
1461 | int handler_remove(int no) |
||
1462 | { |
||
1463 | SYS_FLAGS f; |
||
1464 | return 0; |
||
1465 | |||
1466 | if (no < 1 || no > 15) { |
||
1467 | errno = EWRONG_INT_NO; |
||
1468 | return -1; |
||
1469 | } |
||
1470 | |||
1471 | f = kern_fsave(); |
||
1472 | if (int_table[no].isUsed == FALSE) { |
||
1473 | kern_frestore(f); |
||
1474 | errno = EUNUSED_INT_NO; |
||
1475 | return -1; |
||
1476 | } |
||
1477 | |||
1478 | int_table[no].fast = NULL; |
||
1479 | int_table[no].proc_index = NIL; |
||
1480 | int_table[no].isUsed = FALSE; |
||
1481 | |||
1482 | irq_bind(no,NULL, INT_PREEMPTABLE); |
||
1483 | irq_mask(no); |
||
1484 | kern_frestore(f); |
||
1485 | |||
1486 | return 1; |
||
1487 | |||
1488 | } |
||
1489 | |||
1490 | /* this is the test that is done when a task is being killed |
||
1491 | and it is waiting on a sigwait */ |
||
1492 | static int signal_cancellation_point(PID i, void *arg) |
||
1493 | { |
||
1494 | LEVEL l; |
||
1495 | |||
1496 | if (proc_table[i].status == WAIT_SIG) { |
||
1497 | |||
1498 | if (proc_table[i].delay_timer != -1) { |
||
1499 | event_delete(proc_table[i].delay_timer); |
||
1500 | proc_table[i].delay_timer = -1; |
||
1501 | } |
||
1502 | |||
1503 | q_extract(i, &sigwaiters); |
||
1504 | |||
1505 | l = proc_table[i].task_level; |
||
1506 | level_table[l]->task_insert(l,i); |
||
1507 | |||
1508 | return 1; |
||
1509 | } |
||
1510 | else if (proc_table[i].status == WAIT_SIGSUSPEND) { |
||
1511 | |||
1512 | l = proc_table[i].task_level; |
||
1513 | level_table[l]->task_insert(l,i); |
||
1514 | |||
1515 | return 1; |
||
1516 | } |
||
1517 | |||
1518 | |||
1519 | return 0; |
||
1520 | } |
||
1521 | |||
1522 | void signals_init() |
||
1523 | { |
||
1524 | int i; |
||
1525 | |||
1526 | /* Initialize the default signal actions and the signal |
||
1527 | queue headers. */ |
||
1528 | for (i = 0; i < SIG_MAX; i++) { |
||
1529 | sigactions[i].sa_handler = SIG_DFL; |
||
1530 | sigactions[i].sa_flags = 0; |
||
1531 | sigactions[i].sa_mask = 0; |
||
1532 | sigactions[i].sa_sigaction = 0; |
||
1533 | sigqueued[i] = -1; |
||
1534 | } |
||
1535 | |||
1536 | /* Initialize the signal queue */ |
||
1537 | for (i=0; i < SIGQUEUE_MAX-1; i++) { |
||
1538 | sig_queue[i].next = i+1; |
||
1539 | sig_queue[i].flags = 0; |
||
1540 | } |
||
1541 | sig_queue[SIGQUEUE_MAX-1].next = NIL; |
||
1542 | sig_queue[SIGQUEUE_MAX-1].flags = 0; |
||
1543 | sigqueue_free = 0; |
||
1544 | |||
1545 | procsigpending = 0; |
||
1546 | |||
1547 | sigwaiters = NIL; |
||
1548 | alarm_timer = -1; |
||
1549 | |||
1550 | /* Interrupt handling init */ |
||
1551 | for (i=0; i<16; i++) { |
||
1552 | int_table[i].fast = NULL; |
||
1553 | int_table[i].proc_index = NIL; |
||
1554 | int_table[i].isUsed = FALSE; |
||
1555 | } |
||
1556 | |||
1557 | register_cancellation_point(signal_cancellation_point, NULL); |
||
1558 | } |
||
1559 | |||
1560 |