Subversion Repositories shark

Rev

Rev 422 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
422 giacomo 1
#ifndef __ARCH_I386_ATOMIC__
2
#define __ARCH_I386_ATOMIC__
3
 
4
#include <linux/config.h>
5
 
6
/*
7
 * Atomic operations that C can't guarantee us.  Useful for
8
 * resource counting etc..
9
 */
10
 
11
#ifdef CONFIG_SMP
12
#define LOCK "lock ; "
13
#else
14
#define LOCK ""
15
#endif
16
 
17
/*
18
 * Make sure gcc doesn't try to be clever and move things around
19
 * on us. We need to use _exactly_ the address the user gave us,
20
 * not some alias that contains the same information.
21
 */
22
typedef struct { volatile int counter; } atomic_t;
23
 
24
#define ATOMIC_INIT(i)  { (i) }
25
 
26
/**
27
 * atomic_read - read atomic variable
28
 * @v: pointer of type atomic_t
29
 *
30
 * Atomically reads the value of @v.  Note that the guaranteed
31
 * useful range of an atomic_t is only 24 bits.
32
 */
33
#define atomic_read(v)          ((v)->counter)
34
 
35
/**
36
 * atomic_set - set atomic variable
37
 * @v: pointer of type atomic_t
38
 * @i: required value
39
 *
40
 * Atomically sets the value of @v to @i.  Note that the guaranteed
41
 * useful range of an atomic_t is only 24 bits.
42
 */
43
#define atomic_set(v,i)         (((v)->counter) = (i))
44
 
45
/**
46
 * atomic_add - add integer to atomic variable
47
 * @i: integer value to add
48
 * @v: pointer of type atomic_t
49
 *
50
 * Atomically adds @i to @v.  Note that the guaranteed useful range
51
 * of an atomic_t is only 24 bits.
52
 */
53
static __inline__ void atomic_add(int i, atomic_t *v)
54
{
55
        __asm__ __volatile__(
56
                LOCK "addl %1,%0"
57
                :"=m" (v->counter)
58
                :"ir" (i), "m" (v->counter));
59
}
60
 
61
/**
62
 * atomic_sub - subtract the atomic variable
63
 * @i: integer value to subtract
64
 * @v: pointer of type atomic_t
65
 *
66
 * Atomically subtracts @i from @v.  Note that the guaranteed
67
 * useful range of an atomic_t is only 24 bits.
68
 */
69
static __inline__ void atomic_sub(int i, atomic_t *v)
70
{
71
        __asm__ __volatile__(
72
                LOCK "subl %1,%0"
73
                :"=m" (v->counter)
74
                :"ir" (i), "m" (v->counter));
75
}
76
 
77
/**
78
 * atomic_sub_and_test - subtract value from variable and test result
79
 * @i: integer value to subtract
80
 * @v: pointer of type atomic_t
81
 *
82
 * Atomically subtracts @i from @v and returns
83
 * true if the result is zero, or false for all
84
 * other cases.  Note that the guaranteed
85
 * useful range of an atomic_t is only 24 bits.
86
 */
87
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
88
{
89
        unsigned char c;
90
 
91
        __asm__ __volatile__(
92
                LOCK "subl %2,%0; sete %1"
93
                :"=m" (v->counter), "=qm" (c)
94
                :"ir" (i), "m" (v->counter) : "memory");
95
        return c;
96
}
97
 
98
/**
99
 * atomic_inc - increment atomic variable
100
 * @v: pointer of type atomic_t
101
 *
102
 * Atomically increments @v by 1.  Note that the guaranteed
103
 * useful range of an atomic_t is only 24 bits.
104
 */
105
static __inline__ void atomic_inc(atomic_t *v)
106
{
107
        __asm__ __volatile__(
108
                LOCK "incl %0"
109
                :"=m" (v->counter)
110
                :"m" (v->counter));
111
}
112
 
113
/**
114
 * atomic_dec - decrement atomic variable
115
 * @v: pointer of type atomic_t
116
 *
117
 * Atomically decrements @v by 1.  Note that the guaranteed
118
 * useful range of an atomic_t is only 24 bits.
119
 */
120
static __inline__ void atomic_dec(atomic_t *v)
121
{
122
        __asm__ __volatile__(
123
                LOCK "decl %0"
124
                :"=m" (v->counter)
125
                :"m" (v->counter));
126
}
127
 
128
/**
129
 * atomic_dec_and_test - decrement and test
130
 * @v: pointer of type atomic_t
131
 *
132
 * Atomically decrements @v by 1 and
133
 * returns true if the result is 0, or false for all other
134
 * cases.  Note that the guaranteed
135
 * useful range of an atomic_t is only 24 bits.
136
 */
137
static __inline__ int atomic_dec_and_test(atomic_t *v)
138
{
139
        unsigned char c;
140
 
141
        __asm__ __volatile__(
142
                LOCK "decl %0; sete %1"
143
                :"=m" (v->counter), "=qm" (c)
144
                :"m" (v->counter) : "memory");
145
        return c != 0;
146
}
147
 
148
/**
149
 * atomic_inc_and_test - increment and test
150
 * @v: pointer of type atomic_t
151
 *
152
 * Atomically increments @v by 1
153
 * and returns true if the result is zero, or false for all
154
 * other cases.  Note that the guaranteed
155
 * useful range of an atomic_t is only 24 bits.
156
 */
157
static __inline__ int atomic_inc_and_test(atomic_t *v)
158
{
159
        unsigned char c;
160
 
161
        __asm__ __volatile__(
162
                LOCK "incl %0; sete %1"
163
                :"=m" (v->counter), "=qm" (c)
164
                :"m" (v->counter) : "memory");
165
        return c != 0;
166
}
167
 
168
/**
169
 * atomic_add_negative - add and test if negative
170
 * @v: pointer of type atomic_t
171
 * @i: integer value to add
172
 *
173
 * Atomically adds @i to @v and returns true
174
 * if the result is negative, or false when
175
 * result is greater than or equal to zero.  Note that the guaranteed
176
 * useful range of an atomic_t is only 24 bits.
177
 */
178
static __inline__ int atomic_add_negative(int i, atomic_t *v)
179
{
180
        unsigned char c;
181
 
182
        __asm__ __volatile__(
183
                LOCK "addl %2,%0; sets %1"
184
                :"=m" (v->counter), "=qm" (c)
185
                :"ir" (i), "m" (v->counter) : "memory");
186
        return c;
187
}
188
 
189
/* These are x86-specific, used by some header files */
190
#define atomic_clear_mask(mask, addr) \
191
__asm__ __volatile__(LOCK "andl %0,%1" \
192
: : "r" (~(mask)),"m" (*addr) : "memory")
193
 
194
#define atomic_set_mask(mask, addr) \
195
__asm__ __volatile__(LOCK "orl %0,%1" \
196
: : "r" (mask),"m" (*(addr)) : "memory")
197
 
198
/* Atomic operations are already serializing on x86 */
199
#define smp_mb__before_atomic_dec()     barrier()
200
#define smp_mb__after_atomic_dec()      barrier()
201
#define smp_mb__before_atomic_inc()     barrier()
202
#define smp_mb__after_atomic_inc()      barrier()
203
 
204
#endif