Rev 422 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
422 | giacomo | 1 | #ifndef __ASM_HARDIRQ_H |
2 | #define __ASM_HARDIRQ_H |
||
3 | |||
4 | #include <linux/config.h> |
||
5 | #include <linux/threads.h> |
||
6 | #include <linux/irq.h> |
||
7 | |||
8 | typedef struct { |
||
9 | unsigned int __softirq_pending; |
||
10 | unsigned long idle_timestamp; |
||
11 | unsigned int __nmi_count; /* arch dependent */ |
||
12 | unsigned int apic_timer_irqs; /* arch dependent */ |
||
13 | } ____cacheline_aligned irq_cpustat_t; |
||
14 | |||
15 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ |
||
16 | |||
17 | /* |
||
18 | * We put the hardirq and softirq counter into the preemption |
||
19 | * counter. The bitmask has the following meaning: |
||
20 | * |
||
21 | * - bits 0-7 are the preemption count (max preemption depth: 256) |
||
22 | * - bits 8-15 are the softirq count (max # of softirqs: 256) |
||
23 | * - bits 16-23 are the hardirq count (max # of hardirqs: 256) |
||
24 | * |
||
25 | * - ( bit 26 is the PREEMPT_ACTIVE flag. ) |
||
26 | * |
||
27 | * PREEMPT_MASK: 0x000000ff |
||
28 | * SOFTIRQ_MASK: 0x0000ff00 |
||
29 | * HARDIRQ_MASK: 0x00ff0000 |
||
30 | */ |
||
31 | |||
32 | #define PREEMPT_BITS 8 |
||
33 | #define SOFTIRQ_BITS 8 |
||
34 | #define HARDIRQ_BITS 8 |
||
35 | |||
36 | #define PREEMPT_SHIFT 0 |
||
37 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
||
38 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) |
||
39 | |||
40 | #define __MASK(x) ((1UL << (x))-1) |
||
41 | |||
42 | #define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT) |
||
43 | #define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) |
||
44 | #define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) |
||
45 | |||
46 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
||
47 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
||
48 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) |
||
49 | |||
50 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) |
||
51 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) |
||
52 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
||
53 | |||
54 | /* |
||
55 | * The hardirq mask has to be large enough to have |
||
56 | * space for potentially all IRQ sources in the system |
||
57 | * nesting on a single CPU: |
||
58 | */ |
||
59 | #if (1 << HARDIRQ_BITS) < NR_IRQS |
||
60 | # error HARDIRQ_BITS is too low! |
||
61 | #endif |
||
62 | |||
63 | /* |
||
64 | * Are we doing bottom half or hardware interrupt processing? |
||
65 | * Are we in a softirq context? Interrupt context? |
||
66 | */ |
||
67 | #define in_irq() (hardirq_count()) |
||
68 | #define in_softirq() (softirq_count()) |
||
69 | #define in_interrupt() (irq_count()) |
||
70 | |||
71 | |||
72 | #define hardirq_trylock() (!in_interrupt()) |
||
73 | #define hardirq_endlock() do { } while (0) |
||
74 | |||
75 | #define irq_enter() (preempt_count() += HARDIRQ_OFFSET) |
||
76 | #define nmi_enter() (irq_enter()) |
||
77 | #define nmi_exit() (preempt_count() -= HARDIRQ_OFFSET) |
||
78 | |||
79 | #ifdef CONFIG_PREEMPT |
||
80 | # include <linux/smp_lock.h> |
||
81 | # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) |
||
82 | # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) |
||
83 | #else |
||
84 | # define in_atomic() (preempt_count() != 0) |
||
85 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET |
||
86 | #endif |
||
87 | #define irq_exit() \ |
||
88 | do { \ |
||
89 | preempt_count() -= IRQ_EXIT_OFFSET; \ |
||
90 | if (!in_interrupt() && softirq_pending(smp_processor_id())) \ |
||
91 | do_softirq(); \ |
||
92 | preempt_enable_no_resched(); \ |
||
93 | } while (0) |
||
94 | |||
95 | #ifndef CONFIG_SMP |
||
96 | # define synchronize_irq(irq) barrier() |
||
97 | #else |
||
98 | extern void synchronize_irq(unsigned int irq); |
||
99 | #endif /* CONFIG_SMP */ |
||
100 | |||
101 | #endif /* __ASM_HARDIRQ_H */ |