Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
141 | trimarchi | 1 | #ifndef _I386_BITOPS_H |
2 | #define _I386_BITOPS_H |
||
3 | |||
4 | /* |
||
5 | * Copyright 1992, Linus Torvalds. |
||
6 | */ |
||
7 | |||
8 | /* |
||
9 | * These have to be done with inline assembly: that way the bit-setting |
||
10 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
||
11 | * was cleared before the operation and != 0 if it was not. |
||
12 | * |
||
13 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
||
14 | */ |
||
15 | |||
16 | #ifdef CONFIG_SMP |
||
17 | #define LOCK_PREFIX "lock ; " |
||
18 | #else |
||
19 | #define LOCK_PREFIX "" |
||
20 | #endif |
||
21 | |||
22 | #define ADDR (*(volatile long *) addr) |
||
23 | |||
24 | /** |
||
25 | * set_bit - Atomically set a bit in memory |
||
26 | * @nr: the bit to set |
||
27 | * @addr: the address to start counting from |
||
28 | * |
||
29 | * This function is atomic and may not be reordered. See __set_bit() |
||
30 | * if you do not require the atomic guarantees. |
||
31 | * Note that @nr may be almost arbitrarily large; this function is not |
||
32 | * restricted to acting on a single-word quantity. |
||
33 | */ |
||
34 | static __inline__ void set_bit(int nr, volatile void * addr) |
||
35 | { |
||
36 | __asm__ __volatile__( LOCK_PREFIX |
||
37 | "btsl %1,%0" |
||
38 | :"=m" (ADDR) |
||
39 | :"Ir" (nr)); |
||
40 | } |
||
41 | |||
42 | /** |
||
43 | * __set_bit - Set a bit in memory |
||
44 | * @nr: the bit to set |
||
45 | * @addr: the address to start counting from |
||
46 | * |
||
47 | * Unlike set_bit(), this function is non-atomic and may be reordered. |
||
48 | * If it's called on the same region of memory simultaneously, the effect |
||
49 | * may be that only one operation succeeds. |
||
50 | */ |
||
51 | static __inline__ void __set_bit(int nr, volatile void * addr) |
||
52 | { |
||
53 | __asm__( |
||
54 | "btsl %1,%0" |
||
55 | :"=m" (ADDR) |
||
56 | :"Ir" (nr)); |
||
57 | } |
||
58 | |||
59 | /** |
||
60 | * clear_bit - Clears a bit in memory |
||
61 | * @nr: Bit to clear |
||
62 | * @addr: Address to start counting from |
||
63 | * |
||
64 | * clear_bit() is atomic and may not be reordered. However, it does |
||
65 | * not contain a memory barrier, so if it is used for locking purposes, |
||
66 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
||
67 | * in order to ensure changes are visible on other processors. |
||
68 | */ |
||
69 | static __inline__ void clear_bit(int nr, volatile void * addr) |
||
70 | { |
||
71 | __asm__ __volatile__( LOCK_PREFIX |
||
72 | "btrl %1,%0" |
||
73 | :"=m" (ADDR) |
||
74 | :"Ir" (nr)); |
||
75 | } |
||
76 | #define smp_mb__before_clear_bit() barrier() |
||
77 | #define smp_mb__after_clear_bit() barrier() |
||
78 | |||
79 | /** |
||
80 | * __change_bit - Toggle a bit in memory |
||
81 | * @nr: the bit to set |
||
82 | * @addr: the address to start counting from |
||
83 | * |
||
84 | * Unlike change_bit(), this function is non-atomic and may be reordered. |
||
85 | * If it's called on the same region of memory simultaneously, the effect |
||
86 | * may be that only one operation succeeds. |
||
87 | */ |
||
88 | static __inline__ void __change_bit(int nr, volatile void * addr) |
||
89 | { |
||
90 | __asm__ __volatile__( |
||
91 | "btcl %1,%0" |
||
92 | :"=m" (ADDR) |
||
93 | :"Ir" (nr)); |
||
94 | } |
||
95 | |||
96 | /** |
||
97 | * change_bit - Toggle a bit in memory |
||
98 | * @nr: Bit to clear |
||
99 | * @addr: Address to start counting from |
||
100 | * |
||
101 | * change_bit() is atomic and may not be reordered. |
||
102 | * Note that @nr may be almost arbitrarily large; this function is not |
||
103 | * restricted to acting on a single-word quantity. |
||
104 | */ |
||
105 | static __inline__ void change_bit(int nr, volatile void * addr) |
||
106 | { |
||
107 | __asm__ __volatile__( LOCK_PREFIX |
||
108 | "btcl %1,%0" |
||
109 | :"=m" (ADDR) |
||
110 | :"Ir" (nr)); |
||
111 | } |
||
112 | |||
113 | /** |
||
114 | * test_and_set_bit - Set a bit and return its old value |
||
115 | * @nr: Bit to set |
||
116 | * @addr: Address to count from |
||
117 | * |
||
118 | * This operation is atomic and cannot be reordered. |
||
119 | * It also implies a memory barrier. |
||
120 | */ |
||
121 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) |
||
122 | { |
||
123 | int oldbit; |
||
124 | |||
125 | __asm__ __volatile__( LOCK_PREFIX |
||
126 | "btsl %2,%1\n\tsbbl %0,%0" |
||
127 | :"=r" (oldbit),"=m" (ADDR) |
||
128 | :"Ir" (nr) : "memory"); |
||
129 | return oldbit; |
||
130 | } |
||
131 | |||
132 | /** |
||
133 | * __test_and_set_bit - Set a bit and return its old value |
||
134 | * @nr: Bit to set |
||
135 | * @addr: Address to count from |
||
136 | * |
||
137 | * This operation is non-atomic and can be reordered. |
||
138 | * If two examples of this operation race, one can appear to succeed |
||
139 | * but actually fail. You must protect multiple accesses with a lock. |
||
140 | */ |
||
141 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) |
||
142 | { |
||
143 | int oldbit; |
||
144 | |||
145 | __asm__( |
||
146 | "btsl %2,%1\n\tsbbl %0,%0" |
||
147 | :"=r" (oldbit),"=m" (ADDR) |
||
148 | :"Ir" (nr)); |
||
149 | return oldbit; |
||
150 | } |
||
151 | |||
152 | /** |
||
153 | * test_and_clear_bit - Clear a bit and return its old value |
||
154 | * @nr: Bit to set |
||
155 | * @addr: Address to count from |
||
156 | * |
||
157 | * This operation is atomic and cannot be reordered. |
||
158 | * It also implies a memory barrier. |
||
159 | */ |
||
160 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) |
||
161 | { |
||
162 | int oldbit; |
||
163 | |||
164 | __asm__ __volatile__( LOCK_PREFIX |
||
165 | "btrl %2,%1\n\tsbbl %0,%0" |
||
166 | :"=r" (oldbit),"=m" (ADDR) |
||
167 | :"Ir" (nr) : "memory"); |
||
168 | return oldbit; |
||
169 | } |
||
170 | |||
171 | /** |
||
172 | * __test_and_clear_bit - Clear a bit and return its old value |
||
173 | * @nr: Bit to set |
||
174 | * @addr: Address to count from |
||
175 | * |
||
176 | * This operation is non-atomic and can be reordered. |
||
177 | * If two examples of this operation race, one can appear to succeed |
||
178 | * but actually fail. You must protect multiple accesses with a lock. |
||
179 | */ |
||
180 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) |
||
181 | { |
||
182 | int oldbit; |
||
183 | |||
184 | __asm__( |
||
185 | "btrl %2,%1\n\tsbbl %0,%0" |
||
186 | :"=r" (oldbit),"=m" (ADDR) |
||
187 | :"Ir" (nr)); |
||
188 | return oldbit; |
||
189 | } |
||
190 | |||
191 | /* WARNING: non atomic and it can be reordered! */ |
||
192 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) |
||
193 | { |
||
194 | int oldbit; |
||
195 | |||
196 | __asm__ __volatile__( |
||
197 | "btcl %2,%1\n\tsbbl %0,%0" |
||
198 | :"=r" (oldbit),"=m" (ADDR) |
||
199 | :"Ir" (nr) : "memory"); |
||
200 | return oldbit; |
||
201 | } |
||
202 | |||
203 | /** |
||
204 | * test_and_change_bit - Change a bit and return its new value |
||
205 | * @nr: Bit to set |
||
206 | * @addr: Address to count from |
||
207 | * |
||
208 | * This operation is atomic and cannot be reordered. |
||
209 | * It also implies a memory barrier. |
||
210 | */ |
||
211 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) |
||
212 | { |
||
213 | int oldbit; |
||
214 | |||
215 | __asm__ __volatile__( LOCK_PREFIX |
||
216 | "btcl %2,%1\n\tsbbl %0,%0" |
||
217 | :"=r" (oldbit),"=m" (ADDR) |
||
218 | :"Ir" (nr) : "memory"); |
||
219 | return oldbit; |
||
220 | } |
||
221 | |||
222 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
||
223 | /** |
||
224 | * test_bit - Determine whether a bit is set |
||
225 | * @nr: bit number to test |
||
226 | * @addr: Address to start counting from |
||
227 | */ |
||
228 | static int test_bit(int nr, const volatile void * addr); |
||
229 | #endif |
||
230 | |||
231 | static __inline__ int constant_test_bit(int nr, const volatile void * addr) |
||
232 | { |
||
233 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; |
||
234 | } |
||
235 | |||
236 | static __inline__ int variable_test_bit(int nr, volatile void * addr) |
||
237 | { |
||
238 | int oldbit; |
||
239 | |||
240 | __asm__ __volatile__( |
||
241 | "btl %2,%1\n\tsbbl %0,%0" |
||
242 | :"=r" (oldbit) |
||
243 | :"m" (ADDR),"Ir" (nr)); |
||
244 | return oldbit; |
||
245 | } |
||
246 | |||
247 | #define test_bit(nr,addr) \ |
||
248 | (__builtin_constant_p(nr) ? \ |
||
249 | constant_test_bit((nr),(addr)) : \ |
||
250 | variable_test_bit((nr),(addr))) |
||
251 | |||
252 | /** |
||
253 | * find_first_zero_bit - find the first zero bit in a memory region |
||
254 | * @addr: The address to start the search at |
||
255 | * @size: The maximum size to search |
||
256 | * |
||
257 | * Returns the bit-number of the first zero bit, not the number of the byte |
||
258 | * containing a bit. |
||
259 | */ |
||
260 | static __inline__ int find_first_zero_bit(void * addr, unsigned size) |
||
261 | { |
||
262 | int d0, d1, d2; |
||
263 | int res; |
||
264 | |||
265 | if (!size) |
||
266 | return 0; |
||
267 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ |
||
268 | __asm__ __volatile__( |
||
269 | "movl $-1,%%eax\n\t" |
||
270 | "xorl %%edx,%%edx\n\t" |
||
271 | "repe; scasl\n\t" |
||
272 | "je 1f\n\t" |
||
273 | "xorl -4(%%edi),%%eax\n\t" |
||
274 | "subl $4,%%edi\n\t" |
||
275 | "bsfl %%eax,%%edx\n" |
||
276 | "1:\tsubl %%ebx,%%edi\n\t" |
||
277 | "shll $3,%%edi\n\t" |
||
278 | "addl %%edi,%%edx" |
||
279 | :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) |
||
280 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr)); |
||
281 | return res; |
||
282 | } |
||
283 | |||
284 | /** |
||
285 | * find_next_zero_bit - find the first zero bit in a memory region |
||
286 | * @addr: The address to base the search on |
||
287 | * @offset: The bitnumber to start searching at |
||
288 | * @size: The maximum size to search |
||
289 | */ |
||
290 | static __inline__ int find_next_zero_bit (void * addr, int size, int offset) |
||
291 | { |
||
292 | unsigned long * p = ((unsigned long *) addr) + (offset >> 5); |
||
293 | int set = 0, bit = offset & 31, res; |
||
294 | |||
295 | if (bit) { |
||
296 | /* |
||
297 | * Look for zero in first byte |
||
298 | */ |
||
299 | __asm__("bsfl %1,%0\n\t" |
||
300 | "jne 1f\n\t" |
||
301 | "movl $32, %0\n" |
||
302 | "1:" |
||
303 | : "=r" (set) |
||
304 | : "r" (~(*p >> bit))); |
||
305 | if (set < (32 - bit)) |
||
306 | return set + offset; |
||
307 | set = 32 - bit; |
||
308 | p++; |
||
309 | } |
||
310 | /* |
||
311 | * No zero yet, search remaining full bytes for a zero |
||
312 | */ |
||
313 | res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr)); |
||
314 | return (offset + set + res); |
||
315 | } |
||
316 | |||
317 | /** |
||
318 | * ffz - find first zero in word. |
||
319 | * @word: The word to search |
||
320 | * |
||
321 | * Undefined if no zero exists, so code should check against ~0UL first. |
||
322 | */ |
||
323 | static __inline__ unsigned long ffz(unsigned long word) |
||
324 | { |
||
325 | __asm__("bsfl %1,%0" |
||
326 | :"=r" (word) |
||
327 | :"r" (~word)); |
||
328 | return word; |
||
329 | } |
||
330 | |||
331 | #ifdef __KERNEL__ |
||
332 | |||
333 | /** |
||
334 | * ffs - find first bit set |
||
335 | * @x: the word to search |
||
336 | * |
||
337 | * This is defined the same way as |
||
338 | * the libc and compiler builtin ffs routines, therefore |
||
339 | * differs in spirit from the above ffz (man ffs). |
||
340 | */ |
||
341 | static __inline__ int ffs(int x) |
||
342 | { |
||
343 | int r; |
||
344 | |||
345 | __asm__("bsfl %1,%0\n\t" |
||
346 | "jnz 1f\n\t" |
||
347 | "movl $-1,%0\n" |
||
348 | "1:" : "=r" (r) : "g" (x)); |
||
349 | return r+1; |
||
350 | } |
||
351 | |||
352 | /** |
||
353 | * hweightN - returns the hamming weight of a N-bit word |
||
354 | * @x: the word to weigh |
||
355 | * |
||
356 | * The Hamming Weight of a number is the total number of bits set in it. |
||
357 | */ |
||
358 | |||
359 | #define hweight32(x) generic_hweight32(x) |
||
360 | #define hweight16(x) generic_hweight16(x) |
||
361 | #define hweight8(x) generic_hweight8(x) |
||
362 | |||
363 | #endif /* __KERNEL__ */ |
||
364 | |||
365 | #ifdef __KERNEL__ |
||
366 | |||
367 | #define ext2_set_bit __test_and_set_bit |
||
368 | #define ext2_clear_bit __test_and_clear_bit |
||
369 | #define ext2_test_bit test_bit |
||
370 | #define ext2_find_first_zero_bit find_first_zero_bit |
||
371 | #define ext2_find_next_zero_bit find_next_zero_bit |
||
372 | |||
373 | /* Bitmap functions for the minix filesystem. */ |
||
374 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) |
||
375 | #define minix_set_bit(nr,addr) __set_bit(nr,addr) |
||
376 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) |
||
377 | #define minix_test_bit(nr,addr) test_bit(nr,addr) |
||
378 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) |
||
379 | |||
380 | #endif /* __KERNEL__ */ |
||
381 | |||
382 | #endif /* _I386_BITOPS_H */ |