Rev 490 | Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
422 | giacomo | 1 | #ifndef _LINUX_WAIT_H |
2 | #define _LINUX_WAIT_H |
||
3 | |||
4 | #define WNOHANG 0x00000001 |
||
5 | #define WUNTRACED 0x00000002 |
||
6 | |||
7 | #define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */ |
||
8 | #define __WALL 0x40000000 /* Wait on all children, regardless of type */ |
||
9 | #define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */ |
||
10 | |||
11 | #ifdef __KERNEL__ |
||
12 | |||
13 | #include <linux/config.h> |
||
14 | #include <linux/list.h> |
||
15 | #include <linux/stddef.h> |
||
16 | #include <linux/spinlock.h> |
||
17 | #include <asm/system.h> |
||
18 | |||
19 | typedef struct __wait_queue wait_queue_t; |
||
20 | typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync); |
||
21 | extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync); |
||
22 | |||
23 | struct __wait_queue { |
||
24 | unsigned int flags; |
||
25 | #define WQ_FLAG_EXCLUSIVE 0x01 |
||
26 | struct task_struct * task; |
||
27 | wait_queue_func_t func; |
||
28 | struct list_head task_list; |
||
29 | }; |
||
30 | |||
31 | struct __wait_queue_head { |
||
32 | spinlock_t lock; |
||
33 | struct list_head task_list; |
||
34 | }; |
||
35 | typedef struct __wait_queue_head wait_queue_head_t; |
||
36 | |||
37 | |||
38 | /* |
||
39 | * Macros for declaration and initialisaton of the datatypes |
||
40 | */ |
||
41 | |||
42 | #define __WAITQUEUE_INITIALIZER(name, tsk) { \ |
||
43 | .task = tsk, \ |
||
44 | .func = default_wake_function, \ |
||
45 | .task_list = { NULL, NULL } } |
||
46 | |||
47 | #define DECLARE_WAITQUEUE(name, tsk) \ |
||
48 | wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) |
||
49 | |||
50 | #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ |
||
51 | .lock = SPIN_LOCK_UNLOCKED, \ |
||
52 | .task_list = { &(name).task_list, &(name).task_list } } |
||
53 | |||
54 | #define DECLARE_WAIT_QUEUE_HEAD(name) \ |
||
55 | wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name) |
||
56 | |||
57 | static inline void init_waitqueue_head(wait_queue_head_t *q) |
||
58 | { |
||
59 | q->lock = SPIN_LOCK_UNLOCKED; |
||
60 | INIT_LIST_HEAD(&q->task_list); |
||
61 | } |
||
62 | |||
63 | static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) |
||
64 | { |
||
65 | q->flags = 0; |
||
66 | q->task = p; |
||
67 | q->func = default_wake_function; |
||
68 | } |
||
69 | |||
70 | static inline void init_waitqueue_func_entry(wait_queue_t *q, |
||
71 | wait_queue_func_t func) |
||
72 | { |
||
73 | q->flags = 0; |
||
74 | q->task = NULL; |
||
75 | q->func = func; |
||
76 | } |
||
77 | |||
78 | static inline int waitqueue_active(wait_queue_head_t *q) |
||
79 | { |
||
80 | return !list_empty(&q->task_list); |
||
81 | } |
||
82 | |||
83 | extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); |
||
84 | extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); |
||
85 | extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); |
||
86 | |||
87 | static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) |
||
88 | { |
||
89 | list_add(&new->task_list, &head->task_list); |
||
90 | } |
||
91 | |||
92 | /* |
||
93 | * Used for wake-one threads: |
||
94 | */ |
||
95 | static inline void __add_wait_queue_tail(wait_queue_head_t *head, |
||
96 | wait_queue_t *new) |
||
97 | { |
||
98 | list_add_tail(&new->task_list, &head->task_list); |
||
99 | } |
||
100 | |||
101 | static inline void __remove_wait_queue(wait_queue_head_t *head, |
||
102 | wait_queue_t *old) |
||
103 | { |
||
104 | list_del(&old->task_list); |
||
105 | } |
||
106 | |||
107 | extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr)); |
||
108 | extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode)); |
||
109 | extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); |
||
110 | |||
111 | #define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) |
||
112 | #define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) |
||
113 | #define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) |
||
114 | #define wake_up_all_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) |
||
115 | #define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) |
||
116 | #define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) |
||
117 | #define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) |
||
118 | #define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE) |
||
119 | #define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) |
||
120 | |||
121 | #define __wait_event(wq, condition) \ |
||
122 | do { \ |
||
123 | wait_queue_t __wait; \ |
||
124 | init_waitqueue_entry(&__wait, current); \ |
||
125 | \ |
||
126 | add_wait_queue(&wq, &__wait); \ |
||
127 | for (;;) { \ |
||
128 | set_current_state(TASK_UNINTERRUPTIBLE); \ |
||
129 | if (condition) \ |
||
130 | break; \ |
||
131 | schedule(); \ |
||
132 | } \ |
||
133 | current->state = TASK_RUNNING; \ |
||
134 | remove_wait_queue(&wq, &__wait); \ |
||
135 | } while (0) |
||
136 | |||
137 | #define wait_event(wq, condition) \ |
||
138 | do { \ |
||
139 | if (condition) \ |
||
140 | break; \ |
||
141 | __wait_event(wq, condition); \ |
||
142 | } while (0) |
||
143 | |||
144 | #define __wait_event_interruptible(wq, condition, ret) \ |
||
145 | do { \ |
||
146 | wait_queue_t __wait; \ |
||
147 | init_waitqueue_entry(&__wait, current); \ |
||
148 | \ |
||
149 | add_wait_queue(&wq, &__wait); \ |
||
150 | for (;;) { \ |
||
151 | set_current_state(TASK_INTERRUPTIBLE); \ |
||
152 | if (condition) \ |
||
153 | break; \ |
||
154 | if (!signal_pending(current)) { \ |
||
155 | schedule(); \ |
||
156 | continue; \ |
||
157 | } \ |
||
158 | ret = -ERESTARTSYS; \ |
||
159 | break; \ |
||
160 | } \ |
||
161 | current->state = TASK_RUNNING; \ |
||
162 | remove_wait_queue(&wq, &__wait); \ |
||
163 | } while (0) |
||
164 | |||
165 | #define wait_event_interruptible(wq, condition) \ |
||
166 | ({ \ |
||
167 | int __ret = 0; \ |
||
168 | if (!(condition)) \ |
||
169 | __wait_event_interruptible(wq, condition, __ret); \ |
||
170 | __ret; \ |
||
171 | }) |
||
172 | |||
173 | #define __wait_event_interruptible_timeout(wq, condition, ret) \ |
||
174 | do { \ |
||
175 | wait_queue_t __wait; \ |
||
176 | init_waitqueue_entry(&__wait, current); \ |
||
177 | \ |
||
178 | add_wait_queue(&wq, &__wait); \ |
||
179 | for (;;) { \ |
||
180 | set_current_state(TASK_INTERRUPTIBLE); \ |
||
181 | if (condition) \ |
||
182 | break; \ |
||
183 | if (!signal_pending(current)) { \ |
||
184 | ret = schedule_timeout(ret); \ |
||
185 | if (!ret) \ |
||
186 | break; \ |
||
187 | continue; \ |
||
188 | } \ |
||
189 | ret = -ERESTARTSYS; \ |
||
190 | break; \ |
||
191 | } \ |
||
192 | current->state = TASK_RUNNING; \ |
||
193 | remove_wait_queue(&wq, &__wait); \ |
||
194 | } while (0) |
||
195 | |||
196 | #define wait_event_interruptible_timeout(wq, condition, timeout) \ |
||
197 | ({ \ |
||
198 | long __ret = timeout; \ |
||
199 | if (!(condition)) \ |
||
200 | __wait_event_interruptible_timeout(wq, condition, __ret); \ |
||
201 | __ret; \ |
||
202 | }) |
||
203 | |||
204 | /* |
||
205 | * Must be called with the spinlock in the wait_queue_head_t held. |
||
206 | */ |
||
207 | static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q, |
||
208 | wait_queue_t * wait) |
||
209 | { |
||
210 | wait->flags |= WQ_FLAG_EXCLUSIVE; |
||
211 | __add_wait_queue_tail(q, wait); |
||
212 | } |
||
213 | |||
214 | /* |
||
215 | * Must be called with the spinlock in the wait_queue_head_t held. |
||
216 | */ |
||
217 | static inline void remove_wait_queue_locked(wait_queue_head_t *q, |
||
218 | wait_queue_t * wait) |
||
219 | { |
||
220 | __remove_wait_queue(q, wait); |
||
221 | } |
||
222 | |||
223 | /* |
||
224 | * These are the old interfaces to sleep waiting for an event. |
||
225 | * They are racy. DO NOT use them, use the wait_event* interfaces above. |
||
226 | * We plan to remove these interfaces during 2.7. |
||
227 | */ |
||
228 | extern void FASTCALL(sleep_on(wait_queue_head_t *q)); |
||
229 | extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q, |
||
230 | signed long timeout)); |
||
231 | extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q)); |
||
232 | extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q, |
||
233 | signed long timeout)); |
||
234 | |||
235 | /* |
||
236 | * Waitqueues which are removed from the waitqueue_head at wakeup time |
||
237 | */ |
||
238 | void FASTCALL(prepare_to_wait(wait_queue_head_t *q, |
||
239 | wait_queue_t *wait, int state)); |
||
240 | void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q, |
||
241 | wait_queue_t *wait, int state)); |
||
242 | void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait)); |
||
243 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync); |
||
244 | |||
245 | #define DEFINE_WAIT(name) \ |
||
246 | wait_queue_t name = { \ |
||
247 | .task = current, \ |
||
248 | .func = autoremove_wake_function, \ |
||
249 | .task_list = { .next = &name.task_list, \ |
||
250 | .prev = &name.task_list, \ |
||
251 | }, \ |
||
252 | } |
||
253 | |||
254 | #define init_wait(wait) \ |
||
255 | do { \ |
||
256 | wait->task = current; \ |
||
257 | wait->func = autoremove_wake_function; \ |
||
258 | INIT_LIST_HEAD(&wait->task_list); \ |
||
259 | } while (0) |
||
260 | |||
261 | #endif /* __KERNEL__ */ |
||
262 | |||
263 | #endif |