Rev 422 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
422 | giacomo | 1 | #ifndef _I386_BITOPS_H |
2 | #define _I386_BITOPS_H |
||
3 | |||
4 | /* |
||
5 | * Copyright 1992, Linus Torvalds. |
||
6 | */ |
||
7 | |||
8 | #include <linux/config.h> |
||
9 | #include <linux/compiler.h> |
||
10 | |||
11 | /* |
||
12 | * These have to be done with inline assembly: that way the bit-setting |
||
13 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
||
14 | * was cleared before the operation and != 0 if it was not. |
||
15 | * |
||
16 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
||
17 | */ |
||
18 | |||
19 | #ifdef CONFIG_SMP |
||
20 | #define LOCK_PREFIX "lock ; " |
||
21 | #else |
||
22 | #define LOCK_PREFIX "" |
||
23 | #endif |
||
24 | |||
25 | #define ADDR (*(volatile long *) addr) |
||
26 | |||
27 | /** |
||
28 | * set_bit - Atomically set a bit in memory |
||
29 | * @nr: the bit to set |
||
30 | * @addr: the address to start counting from |
||
31 | * |
||
32 | * This function is atomic and may not be reordered. See __set_bit() |
||
33 | * if you do not require the atomic guarantees. |
||
34 | * Note that @nr may be almost arbitrarily large; this function is not |
||
35 | * restricted to acting on a single-word quantity. |
||
36 | */ |
||
37 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) |
||
38 | { |
||
39 | __asm__ __volatile__( LOCK_PREFIX |
||
40 | "btsl %1,%0" |
||
41 | :"=m" (ADDR) |
||
42 | :"Ir" (nr)); |
||
43 | } |
||
44 | |||
45 | /** |
||
46 | * __set_bit - Set a bit in memory |
||
47 | * @nr: the bit to set |
||
48 | * @addr: the address to start counting from |
||
49 | * |
||
50 | * Unlike set_bit(), this function is non-atomic and may be reordered. |
||
51 | * If it's called on the same region of memory simultaneously, the effect |
||
52 | * may be that only one operation succeeds. |
||
53 | */ |
||
54 | static __inline__ void __set_bit(int nr, volatile unsigned long * addr) |
||
55 | { |
||
56 | __asm__( |
||
57 | "btsl %1,%0" |
||
58 | :"=m" (ADDR) |
||
59 | :"Ir" (nr)); |
||
60 | } |
||
61 | |||
62 | /** |
||
63 | * clear_bit - Clears a bit in memory |
||
64 | * @nr: Bit to clear |
||
65 | * @addr: Address to start counting from |
||
66 | * |
||
67 | * clear_bit() is atomic and may not be reordered. However, it does |
||
68 | * not contain a memory barrier, so if it is used for locking purposes, |
||
69 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
||
70 | * in order to ensure changes are visible on other processors. |
||
71 | */ |
||
72 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) |
||
73 | { |
||
74 | __asm__ __volatile__( LOCK_PREFIX |
||
75 | "btrl %1,%0" |
||
76 | :"=m" (ADDR) |
||
77 | :"Ir" (nr)); |
||
78 | } |
||
79 | |||
80 | static __inline__ void __clear_bit(int nr, volatile unsigned long * addr) |
||
81 | { |
||
82 | __asm__ __volatile__( |
||
83 | "btrl %1,%0" |
||
84 | :"=m" (ADDR) |
||
85 | :"Ir" (nr)); |
||
86 | } |
||
87 | #define smp_mb__before_clear_bit() barrier() |
||
88 | #define smp_mb__after_clear_bit() barrier() |
||
89 | |||
90 | /** |
||
91 | * __change_bit - Toggle a bit in memory |
||
92 | * @nr: the bit to change |
||
93 | * @addr: the address to start counting from |
||
94 | * |
||
95 | * Unlike change_bit(), this function is non-atomic and may be reordered. |
||
96 | * If it's called on the same region of memory simultaneously, the effect |
||
97 | * may be that only one operation succeeds. |
||
98 | */ |
||
99 | static __inline__ void __change_bit(int nr, volatile unsigned long * addr) |
||
100 | { |
||
101 | __asm__ __volatile__( |
||
102 | "btcl %1,%0" |
||
103 | :"=m" (ADDR) |
||
104 | :"Ir" (nr)); |
||
105 | } |
||
106 | |||
107 | /** |
||
108 | * change_bit - Toggle a bit in memory |
||
109 | * @nr: Bit to change |
||
110 | * @addr: Address to start counting from |
||
111 | * |
||
112 | * change_bit() is atomic and may not be reordered. |
||
113 | * Note that @nr may be almost arbitrarily large; this function is not |
||
114 | * restricted to acting on a single-word quantity. |
||
115 | */ |
||
116 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) |
||
117 | { |
||
118 | __asm__ __volatile__( LOCK_PREFIX |
||
119 | "btcl %1,%0" |
||
120 | :"=m" (ADDR) |
||
121 | :"Ir" (nr)); |
||
122 | } |
||
123 | |||
124 | /** |
||
125 | * test_and_set_bit - Set a bit and return its old value |
||
126 | * @nr: Bit to set |
||
127 | * @addr: Address to count from |
||
128 | * |
||
129 | * This operation is atomic and cannot be reordered. |
||
130 | * It also implies a memory barrier. |
||
131 | */ |
||
132 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) |
||
133 | { |
||
134 | int oldbit; |
||
135 | |||
136 | __asm__ __volatile__( LOCK_PREFIX |
||
137 | "btsl %2,%1\n\tsbbl %0,%0" |
||
138 | :"=r" (oldbit),"=m" (ADDR) |
||
139 | :"Ir" (nr) : "memory"); |
||
140 | return oldbit; |
||
141 | } |
||
142 | |||
143 | /** |
||
144 | * __test_and_set_bit - Set a bit and return its old value |
||
145 | * @nr: Bit to set |
||
146 | * @addr: Address to count from |
||
147 | * |
||
148 | * This operation is non-atomic and can be reordered. |
||
149 | * If two examples of this operation race, one can appear to succeed |
||
150 | * but actually fail. You must protect multiple accesses with a lock. |
||
151 | */ |
||
152 | static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * addr) |
||
153 | { |
||
154 | int oldbit; |
||
155 | |||
156 | __asm__( |
||
157 | "btsl %2,%1\n\tsbbl %0,%0" |
||
158 | :"=r" (oldbit),"=m" (ADDR) |
||
159 | :"Ir" (nr)); |
||
160 | return oldbit; |
||
161 | } |
||
162 | |||
163 | /** |
||
164 | * test_and_clear_bit - Clear a bit and return its old value |
||
165 | * @nr: Bit to clear |
||
166 | * @addr: Address to count from |
||
167 | * |
||
168 | * This operation is atomic and cannot be reordered. |
||
169 | * It also implies a memory barrier. |
||
170 | */ |
||
171 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) |
||
172 | { |
||
173 | int oldbit; |
||
174 | |||
175 | __asm__ __volatile__( LOCK_PREFIX |
||
176 | "btrl %2,%1\n\tsbbl %0,%0" |
||
177 | :"=r" (oldbit),"=m" (ADDR) |
||
178 | :"Ir" (nr) : "memory"); |
||
179 | return oldbit; |
||
180 | } |
||
181 | |||
182 | /** |
||
183 | * __test_and_clear_bit - Clear a bit and return its old value |
||
184 | * @nr: Bit to clear |
||
185 | * @addr: Address to count from |
||
186 | * |
||
187 | * This operation is non-atomic and can be reordered. |
||
188 | * If two examples of this operation race, one can appear to succeed |
||
189 | * but actually fail. You must protect multiple accesses with a lock. |
||
190 | */ |
||
191 | static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
||
192 | { |
||
193 | int oldbit; |
||
194 | |||
195 | __asm__( |
||
196 | "btrl %2,%1\n\tsbbl %0,%0" |
||
197 | :"=r" (oldbit),"=m" (ADDR) |
||
198 | :"Ir" (nr)); |
||
199 | return oldbit; |
||
200 | } |
||
201 | |||
202 | /* WARNING: non atomic and it can be reordered! */ |
||
203 | static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr) |
||
204 | { |
||
205 | int oldbit; |
||
206 | |||
207 | __asm__ __volatile__( |
||
208 | "btcl %2,%1\n\tsbbl %0,%0" |
||
209 | :"=r" (oldbit),"=m" (ADDR) |
||
210 | :"Ir" (nr) : "memory"); |
||
211 | return oldbit; |
||
212 | } |
||
213 | |||
214 | /** |
||
215 | * test_and_change_bit - Change a bit and return its new value |
||
216 | * @nr: Bit to change |
||
217 | * @addr: Address to count from |
||
218 | * |
||
219 | * This operation is atomic and cannot be reordered. |
||
220 | * It also implies a memory barrier. |
||
221 | */ |
||
222 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long* addr) |
||
223 | { |
||
224 | int oldbit; |
||
225 | |||
226 | __asm__ __volatile__( LOCK_PREFIX |
||
227 | "btcl %2,%1\n\tsbbl %0,%0" |
||
228 | :"=r" (oldbit),"=m" (ADDR) |
||
229 | :"Ir" (nr) : "memory"); |
||
230 | return oldbit; |
||
231 | } |
||
232 | |||
233 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
||
234 | /** |
||
235 | * test_bit - Determine whether a bit is set |
||
236 | * @nr: bit number to test |
||
237 | * @addr: Address to start counting from |
||
238 | */ |
||
239 | static int test_bit(int nr, const volatile void * addr); |
||
240 | #endif |
||
241 | |||
242 | static inline int constant_test_bit(int nr, const volatile unsigned long *addr) |
||
243 | { |
||
244 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; |
||
245 | } |
||
246 | |||
247 | static __inline__ int variable_test_bit(int nr, const volatile unsigned long * addr) |
||
248 | { |
||
249 | int oldbit; |
||
250 | |||
251 | __asm__ __volatile__( |
||
252 | "btl %2,%1\n\tsbbl %0,%0" |
||
253 | :"=r" (oldbit) |
||
254 | :"m" (ADDR),"Ir" (nr)); |
||
255 | return oldbit; |
||
256 | } |
||
257 | |||
258 | #define test_bit(nr,addr) \ |
||
259 | (__builtin_constant_p(nr) ? \ |
||
260 | constant_test_bit((nr),(addr)) : \ |
||
261 | variable_test_bit((nr),(addr))) |
||
262 | |||
263 | #undef ADDR |
||
264 | |||
265 | /** |
||
266 | * find_first_zero_bit - find the first zero bit in a memory region |
||
267 | * @addr: The address to start the search at |
||
268 | * @size: The maximum size to search |
||
269 | * |
||
270 | * Returns the bit-number of the first zero bit, not the number of the byte |
||
271 | * containing a bit. |
||
272 | */ |
||
273 | static __inline__ int find_first_zero_bit(const unsigned long *addr, unsigned size) |
||
274 | { |
||
275 | int d0, d1, d2; |
||
276 | int res; |
||
277 | |||
278 | if (!size) |
||
279 | return 0; |
||
280 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ |
||
281 | __asm__ __volatile__( |
||
282 | "movl $-1,%%eax\n\t" |
||
283 | "xorl %%edx,%%edx\n\t" |
||
284 | "repe; scasl\n\t" |
||
285 | "je 1f\n\t" |
||
286 | "xorl -4(%%edi),%%eax\n\t" |
||
287 | "subl $4,%%edi\n\t" |
||
288 | "bsfl %%eax,%%edx\n" |
||
289 | "1:\tsubl %%ebx,%%edi\n\t" |
||
290 | "shll $3,%%edi\n\t" |
||
291 | "addl %%edi,%%edx" |
||
292 | :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) |
||
293 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr)); |
||
294 | return res; |
||
295 | } |
||
296 | |||
297 | /** |
||
298 | * find_first_bit - find the first set bit in a memory region |
||
299 | * @addr: The address to start the search at |
||
300 | * @size: The maximum size to search |
||
301 | * |
||
302 | * Returns the bit-number of the first set bit, not the number of the byte |
||
303 | * containing a bit. |
||
304 | */ |
||
305 | static __inline__ int find_first_bit(const unsigned long *addr, unsigned size) |
||
306 | { |
||
307 | int d0, d1; |
||
308 | int res; |
||
309 | |||
310 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ |
||
311 | __asm__ __volatile__( |
||
312 | "xorl %%eax,%%eax\n\t" |
||
313 | "repe; scasl\n\t" |
||
314 | "jz 1f\n\t" |
||
315 | "leal -4(%%edi),%%edi\n\t" |
||
316 | "bsfl (%%edi),%%eax\n" |
||
317 | "1:\tsubl %%ebx,%%edi\n\t" |
||
318 | "shll $3,%%edi\n\t" |
||
319 | "addl %%edi,%%eax" |
||
320 | :"=a" (res), "=&c" (d0), "=&D" (d1) |
||
321 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr)); |
||
322 | return res; |
||
323 | } |
||
324 | |||
325 | /** |
||
326 | * find_next_zero_bit - find the first zero bit in a memory region |
||
327 | * @addr: The address to base the search on |
||
328 | * @offset: The bitnumber to start searching at |
||
329 | * @size: The maximum size to search |
||
330 | */ |
||
331 | static __inline__ int find_next_zero_bit(const unsigned long *addr, int size, int offset) |
||
332 | { |
||
333 | unsigned long * p = ((unsigned long *) addr) + (offset >> 5); |
||
334 | int set = 0, bit = offset & 31, res; |
||
335 | |||
336 | if (bit) { |
||
337 | /* |
||
338 | * Look for zero in the first 32 bits. |
||
339 | */ |
||
340 | __asm__("bsfl %1,%0\n\t" |
||
341 | "jne 1f\n\t" |
||
342 | "movl $32, %0\n" |
||
343 | "1:" |
||
344 | : "=r" (set) |
||
345 | : "r" (~(*p >> bit))); |
||
346 | if (set < (32 - bit)) |
||
347 | return set + offset; |
||
348 | set = 32 - bit; |
||
349 | p++; |
||
350 | } |
||
351 | /* |
||
352 | * No zero yet, search remaining full bytes for a zero |
||
353 | */ |
||
354 | res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr)); |
||
355 | return (offset + set + res); |
||
356 | } |
||
357 | |||
358 | /** |
||
359 | * find_next_bit - find the first set bit in a memory region |
||
360 | * @addr: The address to base the search on |
||
361 | * @offset: The bitnumber to start searching at |
||
362 | * @size: The maximum size to search |
||
363 | */ |
||
364 | static __inline__ int find_next_bit(const unsigned long *addr, int size, int offset) |
||
365 | { |
||
366 | const unsigned long *p = addr + (offset >> 5); |
||
367 | int set = 0, bit = offset & 31, res; |
||
368 | |||
369 | if (bit) { |
||
370 | /* |
||
371 | * Look for nonzero in the first 32 bits: |
||
372 | */ |
||
373 | __asm__("bsfl %1,%0\n\t" |
||
374 | "jne 1f\n\t" |
||
375 | "movl $32, %0\n" |
||
376 | "1:" |
||
377 | : "=r" (set) |
||
378 | : "r" (*p >> bit)); |
||
379 | if (set < (32 - bit)) |
||
380 | return set + offset; |
||
381 | set = 32 - bit; |
||
382 | p++; |
||
383 | } |
||
384 | /* |
||
385 | * No set bit yet, search remaining full words for a bit |
||
386 | */ |
||
387 | res = find_first_bit (p, size - 32 * (p - addr)); |
||
388 | return (offset + set + res); |
||
389 | } |
||
390 | |||
391 | /** |
||
392 | * ffz - find first zero in word. |
||
393 | * @word: The word to search |
||
394 | * |
||
395 | * Undefined if no zero exists, so code should check against ~0UL first. |
||
396 | */ |
||
397 | static __inline__ unsigned long ffz(unsigned long word) |
||
398 | { |
||
399 | __asm__("bsfl %1,%0" |
||
400 | :"=r" (word) |
||
401 | :"r" (~word)); |
||
402 | return word; |
||
403 | } |
||
404 | |||
405 | /** |
||
406 | * __ffs - find first bit in word. |
||
407 | * @word: The word to search |
||
408 | * |
||
409 | * Undefined if no bit exists, so code should check against 0 first. |
||
410 | */ |
||
411 | static __inline__ unsigned long __ffs(unsigned long word) |
||
412 | { |
||
413 | __asm__("bsfl %1,%0" |
||
414 | :"=r" (word) |
||
415 | :"rm" (word)); |
||
416 | return word; |
||
417 | } |
||
418 | |||
419 | /* |
||
420 | * fls: find last bit set. |
||
421 | */ |
||
422 | |||
423 | #define fls(x) generic_fls(x) |
||
424 | |||
425 | #ifdef __KERNEL__ |
||
426 | |||
427 | /* |
||
428 | * Every architecture must define this function. It's the fastest |
||
429 | * way of searching a 140-bit bitmap where the first 100 bits are |
||
430 | * unlikely to be set. It's guaranteed that at least one of the 140 |
||
431 | * bits is cleared. |
||
432 | */ |
||
433 | static inline int sched_find_first_bit(const unsigned long *b) |
||
434 | { |
||
435 | if (unlikely(b[0])) |
||
436 | return __ffs(b[0]); |
||
437 | if (unlikely(b[1])) |
||
438 | return __ffs(b[1]) + 32; |
||
439 | if (unlikely(b[2])) |
||
440 | return __ffs(b[2]) + 64; |
||
441 | if (b[3]) |
||
442 | return __ffs(b[3]) + 96; |
||
443 | return __ffs(b[4]) + 128; |
||
444 | } |
||
445 | |||
446 | /** |
||
447 | * ffs - find first bit set |
||
448 | * @x: the word to search |
||
449 | * |
||
450 | * This is defined the same way as |
||
451 | * the libc and compiler builtin ffs routines, therefore |
||
452 | * differs in spirit from the above ffz (man ffs). |
||
453 | */ |
||
454 | static __inline__ int ffs(int x) |
||
455 | { |
||
456 | int r; |
||
457 | |||
458 | __asm__("bsfl %1,%0\n\t" |
||
459 | "jnz 1f\n\t" |
||
460 | "movl $-1,%0\n" |
||
461 | "1:" : "=r" (r) : "rm" (x)); |
||
462 | return r+1; |
||
463 | } |
||
464 | |||
465 | /** |
||
466 | * hweightN - returns the hamming weight of a N-bit word |
||
467 | * @x: the word to weigh |
||
468 | * |
||
469 | * The Hamming Weight of a number is the total number of bits set in it. |
||
470 | */ |
||
471 | |||
472 | #define hweight32(x) generic_hweight32(x) |
||
473 | #define hweight16(x) generic_hweight16(x) |
||
474 | #define hweight8(x) generic_hweight8(x) |
||
475 | |||
476 | #endif /* __KERNEL__ */ |
||
477 | |||
478 | #ifdef __KERNEL__ |
||
479 | |||
480 | #define ext2_set_bit(nr,addr) \ |
||
481 | __test_and_set_bit((nr),(unsigned long*)addr) |
||
482 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
||
483 | test_and_set_bit((nr),(unsigned long*)addr) |
||
484 | #define ext2_clear_bit(nr, addr) \ |
||
485 | __test_and_clear_bit((nr),(unsigned long*)addr) |
||
486 | #define ext2_clear_bit_atomic(lock,nr, addr) \ |
||
487 | test_and_clear_bit((nr),(unsigned long*)addr) |
||
488 | #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) |
||
489 | #define ext2_find_first_zero_bit(addr, size) \ |
||
490 | find_first_zero_bit((unsigned long*)addr, size) |
||
491 | #define ext2_find_next_zero_bit(addr, size, off) \ |
||
492 | find_next_zero_bit((unsigned long*)addr, size, off) |
||
493 | |||
494 | /* Bitmap functions for the minix filesystem. */ |
||
495 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) |
||
496 | #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) |
||
497 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) |
||
498 | #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) |
||
499 | #define minix_find_first_zero_bit(addr,size) \ |
||
500 | find_first_zero_bit((void*)addr,size) |
||
501 | |||
502 | #endif /* __KERNEL__ */ |
||
503 | |||
504 | #endif /* _I386_BITOPS_H */ |