Rev 422 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
422 | giacomo | 1 | /* interrupt.h */ |
2 | #ifndef _LINUX_INTERRUPT_H |
||
3 | #define _LINUX_INTERRUPT_H |
||
4 | |||
5 | #include <linux/config.h> |
||
6 | #include <linux/kernel.h> |
||
7 | #include <linux/linkage.h> |
||
8 | #include <linux/bitops.h> |
||
9 | #include <linux/preempt.h> |
||
10 | #include <asm/atomic.h> |
||
11 | #include <asm/hardirq.h> |
||
12 | #include <asm/ptrace.h> |
||
13 | #include <asm/system.h> |
||
14 | |||
15 | /* |
||
16 | * For 2.4.x compatibility, 2.4.x can use |
||
17 | * |
||
18 | * typedef void irqreturn_t; |
||
19 | * #define IRQ_NONE |
||
20 | * #define IRQ_HANDLED |
||
21 | * #define IRQ_RETVAL(x) |
||
22 | * |
||
23 | * To mix old-style and new-style irq handler returns. |
||
24 | * |
||
25 | * IRQ_NONE means we didn't handle it. |
||
26 | * IRQ_HANDLED means that we did have a valid interrupt and handled it. |
||
27 | * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled) |
||
28 | */ |
||
29 | typedef int irqreturn_t; |
||
30 | |||
31 | #define IRQ_NONE (0) |
||
32 | #define IRQ_HANDLED (1) |
||
33 | #define IRQ_RETVAL(x) ((x) != 0) |
||
34 | |||
35 | struct irqaction { |
||
36 | irqreturn_t (*handler)(int, void *, struct pt_regs *); |
||
37 | unsigned long flags; |
||
38 | unsigned long mask; |
||
39 | const char *name; |
||
40 | void *dev_id; |
||
41 | struct irqaction *next; |
||
42 | }; |
||
43 | |||
44 | extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs); |
||
45 | extern int request_irq(unsigned int, |
||
46 | irqreturn_t (*handler)(int, void *, struct pt_regs *), |
||
47 | unsigned long, const char *, void *); |
||
48 | extern void free_irq(unsigned int, void *); |
||
49 | |||
50 | /* |
||
51 | * Temporary defines for UP kernels, until all code gets fixed. |
||
52 | */ |
||
53 | #ifndef CONFIG_SMP |
||
54 | # define cli() local_irq_disable() |
||
55 | # define sti() local_irq_enable() |
||
56 | # define save_flags(x) local_save_flags(x) |
||
57 | # define restore_flags(x) local_irq_restore(x) |
||
58 | # define save_and_cli(x) local_irq_save(x) |
||
59 | #endif |
||
60 | |||
61 | /* SoftIRQ primitives. */ |
||
62 | #define local_bh_disable() \ |
||
63 | do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0) |
||
64 | #define __local_bh_enable() \ |
||
65 | do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0) |
||
66 | |||
67 | extern void local_bh_enable(void); |
||
68 | |||
69 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high |
||
70 | frequency threaded job scheduling. For almost all the purposes |
||
71 | tasklets are more than enough. F.e. all serial device BHs et |
||
72 | al. should be converted to tasklets, not to softirqs. |
||
73 | */ |
||
74 | |||
75 | enum |
||
76 | { |
||
77 | HI_SOFTIRQ=0, |
||
78 | TIMER_SOFTIRQ, |
||
79 | NET_TX_SOFTIRQ, |
||
80 | NET_RX_SOFTIRQ, |
||
81 | SCSI_SOFTIRQ, |
||
82 | TASKLET_SOFTIRQ |
||
83 | }; |
||
84 | |||
85 | /* softirq mask and active fields moved to irq_cpustat_t in |
||
86 | * asm/hardirq.h to get better cache usage. KAO |
||
87 | */ |
||
88 | |||
89 | struct softirq_action |
||
90 | { |
||
91 | void (*action)(struct softirq_action *); |
||
92 | void *data; |
||
93 | }; |
||
94 | |||
95 | asmlinkage void do_softirq(void); |
||
96 | extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); |
||
97 | extern void softirq_init(void); |
||
98 | #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0) |
||
99 | extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); |
||
100 | extern void FASTCALL(raise_softirq(unsigned int nr)); |
||
101 | |||
102 | #ifndef invoke_softirq |
||
103 | #define invoke_softirq() do_softirq() |
||
104 | #endif |
||
105 | |||
106 | |||
107 | /* Tasklets --- multithreaded analogue of BHs. |
||
108 | |||
109 | Main feature differing them of generic softirqs: tasklet |
||
110 | is running only on one CPU simultaneously. |
||
111 | |||
112 | Main feature differing them of BHs: different tasklets |
||
113 | may be run simultaneously on different CPUs. |
||
114 | |||
115 | Properties: |
||
116 | * If tasklet_schedule() is called, then tasklet is guaranteed |
||
117 | to be executed on some cpu at least once after this. |
||
118 | * If the tasklet is already scheduled, but its excecution is still not |
||
119 | started, it will be executed only once. |
||
120 | * If this tasklet is already running on another CPU (or schedule is called |
||
121 | from tasklet itself), it is rescheduled for later. |
||
122 | * Tasklet is strictly serialized wrt itself, but not |
||
123 | wrt another tasklets. If client needs some intertask synchronization, |
||
124 | he makes it with spinlocks. |
||
125 | */ |
||
126 | |||
127 | struct tasklet_struct |
||
128 | { |
||
129 | struct tasklet_struct *next; |
||
130 | unsigned long state; |
||
131 | atomic_t count; |
||
132 | void (*func)(unsigned long); |
||
133 | unsigned long data; |
||
134 | }; |
||
135 | |||
136 | #define DECLARE_TASKLET(name, func, data) \ |
||
137 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } |
||
138 | |||
139 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ |
||
140 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } |
||
141 | |||
142 | |||
143 | enum |
||
144 | { |
||
145 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ |
||
146 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ |
||
147 | }; |
||
148 | |||
149 | #ifdef CONFIG_SMP |
||
150 | static inline int tasklet_trylock(struct tasklet_struct *t) |
||
151 | { |
||
152 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); |
||
153 | } |
||
154 | |||
155 | static inline void tasklet_unlock(struct tasklet_struct *t) |
||
156 | { |
||
157 | smp_mb__before_clear_bit(); |
||
158 | clear_bit(TASKLET_STATE_RUN, &(t)->state); |
||
159 | } |
||
160 | |||
161 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) |
||
162 | { |
||
163 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } |
||
164 | } |
||
165 | #else |
||
166 | #define tasklet_trylock(t) 1 |
||
167 | #define tasklet_unlock_wait(t) do { } while (0) |
||
168 | #define tasklet_unlock(t) do { } while (0) |
||
169 | #endif |
||
170 | |||
171 | extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t)); |
||
172 | |||
173 | static inline void tasklet_schedule(struct tasklet_struct *t) |
||
174 | { |
||
175 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
||
176 | __tasklet_schedule(t); |
||
177 | } |
||
178 | |||
179 | extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t)); |
||
180 | |||
181 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) |
||
182 | { |
||
183 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
||
184 | __tasklet_hi_schedule(t); |
||
185 | } |
||
186 | |||
187 | |||
188 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) |
||
189 | { |
||
190 | atomic_inc(&t->count); |
||
191 | smp_mb__after_atomic_inc(); |
||
192 | } |
||
193 | |||
194 | static inline void tasklet_disable(struct tasklet_struct *t) |
||
195 | { |
||
196 | tasklet_disable_nosync(t); |
||
197 | tasklet_unlock_wait(t); |
||
198 | smp_mb(); |
||
199 | } |
||
200 | |||
201 | static inline void tasklet_enable(struct tasklet_struct *t) |
||
202 | { |
||
203 | smp_mb__before_atomic_dec(); |
||
204 | atomic_dec(&t->count); |
||
205 | } |
||
206 | |||
207 | static inline void tasklet_hi_enable(struct tasklet_struct *t) |
||
208 | { |
||
209 | smp_mb__before_atomic_dec(); |
||
210 | atomic_dec(&t->count); |
||
211 | } |
||
212 | |||
213 | extern void tasklet_kill(struct tasklet_struct *t); |
||
214 | extern void tasklet_init(struct tasklet_struct *t, |
||
215 | void (*func)(unsigned long), unsigned long data); |
||
216 | |||
217 | /* |
||
218 | * Autoprobing for irqs: |
||
219 | * |
||
220 | * probe_irq_on() and probe_irq_off() provide robust primitives |
||
221 | * for accurate IRQ probing during kernel initialization. They are |
||
222 | * reasonably simple to use, are not "fooled" by spurious interrupts, |
||
223 | * and, unlike other attempts at IRQ probing, they do not get hung on |
||
224 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). |
||
225 | * |
||
226 | * For reasonably foolproof probing, use them as follows: |
||
227 | * |
||
228 | * 1. clear and/or mask the device's internal interrupt. |
||
229 | * 2. sti(); |
||
230 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs |
||
231 | * 4. enable the device and cause it to trigger an interrupt. |
||
232 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. |
||
233 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple |
||
234 | * 7. service the device to clear its pending interrupt. |
||
235 | * 8. loop again if paranoia is required. |
||
236 | * |
||
237 | * probe_irq_on() returns a mask of allocated irq's. |
||
238 | * |
||
239 | * probe_irq_off() takes the mask as a parameter, |
||
240 | * and returns the irq number which occurred, |
||
241 | * or zero if none occurred, or a negative irq number |
||
242 | * if more than one irq occurred. |
||
243 | */ |
||
244 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ |
||
245 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ |
||
246 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ |
||
247 | |||
248 | #endif |