Subversion Repositories shark

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
422 giacomo 1
#ifndef __ASM_SYSTEM_H
2
#define __ASM_SYSTEM_H
3
 
4
#include <linux/config.h>
5
#include <linux/kernel.h>
6
#include <asm/segment.h>
7
#include <asm/cpufeature.h>
8
#include <linux/bitops.h> /* for LOCK_PREFIX */
9
 
10
#ifdef __KERNEL__
11
 
12
struct task_struct;     /* one of the stranger aspects of C forward declarations.. */
13
extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
14
 
15
#define switch_to(prev,next,last) do {                                  \
16
        unsigned long esi,edi;                                          \
17
        asm volatile("pushfl\n\t"                                       \
18
                     "pushl %%ebp\n\t"                                  \
19
                     "movl %%esp,%0\n\t"        /* save ESP */          \
20
                     "movl %5,%%esp\n\t"        /* restore ESP */       \
21
                     "movl $1f,%1\n\t"          /* save EIP */          \
22
                     "pushl %6\n\t"             /* restore EIP */       \
23
                     "jmp __switch_to\n"                                \
24
                     "1:\t"                                             \
25
                     "popl %%ebp\n\t"                                   \
26
                     "popfl"                                            \
27
                     :"=m" (prev->thread.esp),"=m" (prev->thread.eip),  \
28
                      "=a" (last),"=S" (esi),"=D" (edi)                 \
29
                     :"m" (next->thread.esp),"m" (next->thread.eip),    \
30
                      "2" (prev), "d" (next));                          \
31
} while (0)
32
 
33
#define _set_base(addr,base) do { unsigned long __pr; \
34
__asm__ __volatile__ ("movw %%dx,%1\n\t" \
35
        "rorl $16,%%edx\n\t" \
36
        "movb %%dl,%2\n\t" \
37
        "movb %%dh,%3" \
38
        :"=&d" (__pr) \
39
        :"m" (*((addr)+2)), \
40
         "m" (*((addr)+4)), \
41
         "m" (*((addr)+7)), \
42
         "0" (base) \
43
        ); } while(0)
44
 
45
#define _set_limit(addr,limit) do { unsigned long __lr; \
46
__asm__ __volatile__ ("movw %%dx,%1\n\t" \
47
        "rorl $16,%%edx\n\t" \
48
        "movb %2,%%dh\n\t" \
49
        "andb $0xf0,%%dh\n\t" \
50
        "orb %%dh,%%dl\n\t" \
51
        "movb %%dl,%2" \
52
        :"=&d" (__lr) \
53
        :"m" (*(addr)), \
54
         "m" (*((addr)+6)), \
55
         "0" (limit) \
56
        ); } while(0)
57
 
58
#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
59
#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
60
 
61
static inline unsigned long _get_base(char * addr)
62
{
63
        unsigned long __base;
64
        __asm__("movb %3,%%dh\n\t"
65
                "movb %2,%%dl\n\t"
66
                "shll $16,%%edx\n\t"
67
                "movw %1,%%dx"
68
                :"=&d" (__base)
69
                :"m" (*((addr)+2)),
70
                 "m" (*((addr)+4)),
71
                 "m" (*((addr)+7)));
72
        return __base;
73
}
74
 
75
#define get_base(ldt) _get_base( ((char *)&(ldt)) )
76
 
77
/*
78
 * Load a segment. Fall back on loading the zero
79
 * segment if something goes wrong..
80
 */
81
#define loadsegment(seg,value)                  \
82
        asm volatile("\n"                       \
83
                "1:\t"                          \
84
                "movl %0,%%" #seg "\n"          \
85
                "2:\n"                          \
86
                ".section .fixup,\"ax\"\n"      \
87
                "3:\t"                          \
88
                "pushl $0\n\t"                  \
89
                "popl %%" #seg "\n\t"           \
90
                "jmp 2b\n"                      \
91
                ".previous\n"                   \
92
                ".section __ex_table,\"a\"\n\t" \
93
                ".align 4\n\t"                  \
94
                ".long 1b,3b\n"                 \
95
                ".previous"                     \
96
                : :"m" (*(unsigned int *)&(value)))
97
 
98
/*
99
 * Save a segment register away
100
 */
101
#define savesegment(seg, value) \
102
        asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
103
 
104
/*
105
 * Clear and set 'TS' bit respectively
106
 */
107
#define clts() __asm__ __volatile__ ("clts")
108
#define read_cr0() ({ \
109
        unsigned int __dummy; \
110
        __asm__( \
111
                "movl %%cr0,%0\n\t" \
112
                :"=r" (__dummy)); \
113
        __dummy; \
114
})
115
#define write_cr0(x) \
116
        __asm__("movl %0,%%cr0": :"r" (x));
117
 
118
#define read_cr4() ({ \
119
        unsigned int __dummy; \
120
        __asm__( \
121
                "movl %%cr4,%0\n\t" \
122
                :"=r" (__dummy)); \
123
        __dummy; \
124
})
125
#define write_cr4(x) \
126
        __asm__("movl %0,%%cr4": :"r" (x));
127
#define stts() write_cr0(8 | read_cr0())
128
 
129
#endif  /* __KERNEL__ */
130
 
131
#define wbinvd() \
132
        __asm__ __volatile__ ("wbinvd": : :"memory");
133
 
134
static inline unsigned long get_limit(unsigned long segment)
135
{
136
        unsigned long __limit;
137
        __asm__("lsll %1,%0"
138
                :"=r" (__limit):"r" (segment));
139
        return __limit+1;
140
}
141
 
142
#define nop() __asm__ __volatile__ ("nop")
143
 
144
#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
145
 
146
#define tas(ptr) (xchg((ptr),1))
147
 
148
struct __xchg_dummy { unsigned long a[100]; };
149
#define __xg(x) ((struct __xchg_dummy *)(x))
150
 
151
 
152
/*
153
 * The semantics of XCHGCMP8B are a bit strange, this is why
154
 * there is a loop and the loading of %%eax and %%edx has to
155
 * be inside. This inlines well in most cases, the cached
156
 * cost is around ~38 cycles. (in the future we might want
157
 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
158
 * might have an implicit FPU-save as a cost, so it's not
159
 * clear which path to go.)
160
 *
161
 * cmpxchg8b must be used with the lock prefix here to allow
162
 * the instruction to be executed atomically, see page 3-102
163
 * of the instruction set reference 24319102.pdf. We need
164
 * the reader side to see the coherent 64bit value.
165
 */
166
static inline void __set_64bit (unsigned long long * ptr,
167
                unsigned int low, unsigned int high)
168
{
169
        __asm__ __volatile__ (
170
                "\n1:\t"
171
                "movl (%0), %%eax\n\t"
172
                "movl 4(%0), %%edx\n\t"
173
                "lock cmpxchg8b (%0)\n\t"
174
                "jnz 1b"
175
                : /* no outputs */
176
                :       "D"(ptr),
177
                        "b"(low),
178
                        "c"(high)
179
                :       "ax","dx","memory");
180
}
181
 
182
static inline void __set_64bit_constant (unsigned long long *ptr,
183
                                                 unsigned long long value)
184
{
185
        __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
186
}
187
#define ll_low(x)       *(((unsigned int*)&(x))+0)
188
#define ll_high(x)      *(((unsigned int*)&(x))+1)
189
 
190
static inline void __set_64bit_var (unsigned long long *ptr,
191
                         unsigned long long value)
192
{
193
        __set_64bit(ptr,ll_low(value), ll_high(value));
194
}
195
 
196
#define set_64bit(ptr,value) \
197
(__builtin_constant_p(value) ? \
198
 __set_64bit_constant(ptr, value) : \
199
 __set_64bit_var(ptr, value) )
200
 
201
#define _set_64bit(ptr,value) \
202
(__builtin_constant_p(value) ? \
203
 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
204
 __set_64bit(ptr, ll_low(value), ll_high(value)) )
205
 
206
/*
207
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
208
 * Note 2: xchg has side effect, so that attribute volatile is necessary,
209
 *        but generally the primitive is invalid, *ptr is output argument. --ANK
210
 */
211
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
212
{
213
        switch (size) {
214
                case 1:
215
                        __asm__ __volatile__("xchgb %b0,%1"
216
                                :"=q" (x)
217
                                :"m" (*__xg(ptr)), "0" (x)
218
                                :"memory");
219
                        break;
220
                case 2:
221
                        __asm__ __volatile__("xchgw %w0,%1"
222
                                :"=r" (x)
223
                                :"m" (*__xg(ptr)), "0" (x)
224
                                :"memory");
225
                        break;
226
                case 4:
227
                        __asm__ __volatile__("xchgl %0,%1"
228
                                :"=r" (x)
229
                                :"m" (*__xg(ptr)), "0" (x)
230
                                :"memory");
231
                        break;
232
        }
233
        return x;
234
}
235
 
236
/*
237
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
238
 * store NEW in MEM.  Return the initial value in MEM.  Success is
239
 * indicated by comparing RETURN with OLD.
240
 */
241
 
242
#ifdef CONFIG_X86_CMPXCHG
243
#define __HAVE_ARCH_CMPXCHG 1
244
 
245
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
246
                                      unsigned long new, int size)
247
{
248
        unsigned long prev;
249
        switch (size) {
250
        case 1:
251
                __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
252
                                     : "=a"(prev)
253
                                     : "q"(new), "m"(*__xg(ptr)), "0"(old)
254
                                     : "memory");
255
                return prev;
256
        case 2:
257
                __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
258
                                     : "=a"(prev)
259
                                     : "q"(new), "m"(*__xg(ptr)), "0"(old)
260
                                     : "memory");
261
                return prev;
262
        case 4:
263
                __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
264
                                     : "=a"(prev)
265
                                     : "q"(new), "m"(*__xg(ptr)), "0"(old)
266
                                     : "memory");
267
                return prev;
268
        }
269
        return old;
270
}
271
 
272
#define cmpxchg(ptr,o,n)\
273
        ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
274
                                        (unsigned long)(n),sizeof(*(ptr))))
275
 
276
#else
277
/* Compiling for a 386 proper.  Is it worth implementing via cli/sti?  */
278
#endif
279
 
280
#ifdef __KERNEL__
281
struct alt_instr {
282
        __u8 *instr;            /* original instruction */
283
        __u8 *replacement;
284
        __u8  cpuid;            /* cpuid bit set for replacement */
285
        __u8  instrlen;         /* length of original instruction */
286
        __u8  replacementlen;   /* length of new instruction, <= instrlen */
287
        __u8  pad;
288
};
289
#endif
290
 
291
/*
292
 * Alternative instructions for different CPU types or capabilities.
293
 *
294
 * This allows to use optimized instructions even on generic binary
295
 * kernels.
296
 *
297
 * length of oldinstr must be longer or equal the length of newinstr
298
 * It can be padded with nops as needed.
299
 *
300
 * For non barrier like inlines please define new variants
301
 * without volatile and memory clobber.
302
 */
303
#define alternative(oldinstr, newinstr, feature)        \
304
        asm volatile ("661:\n\t" oldinstr "\n662:\n"                 \
305
                      ".section .altinstructions,\"a\"\n"            \
306
                      "  .align 4\n"                                   \
307
                      "  .long 661b\n"            /* label */          \
308
                      "  .long 663f\n"            /* new instruction */         \
309
                      "  .byte %c0\n"             /* feature bit */    \
310
                      "  .byte 662b-661b\n"       /* sourcelen */      \
311
                      "  .byte 664f-663f\n"       /* replacementlen */ \
312
                      ".previous\n"                                             \
313
                      ".section .altinstr_replacement,\"ax\"\n"                 \
314
                      "663:\n\t" newinstr "\n664:\n"   /* replacement */    \
315
                      ".previous" :: "i" (feature) : "memory")  
316
 
317
/*
318
 * Alternative inline assembly with input.
319
 *
320
 * Pecularities:
321
 * No memory clobber here.
322
 * Argument numbers start with 1.
323
 * Best is to use constraints that are fixed size (like (%1) ... "r")
324
 * If you use variable sized constraints like "m" or "g" in the
325
 * replacement maake sure to pad to the worst case length.
326
 */
327
#define alternative_input(oldinstr, newinstr, feature, input)                   \
328
        asm volatile ("661:\n\t" oldinstr "\n662:\n"                            \
329
                      ".section .altinstructions,\"a\"\n"                       \
330
                      "  .align 4\n"                                            \
331
                      "  .long 661b\n"            /* label */                   \
332
                      "  .long 663f\n"            /* new instruction */         \
333
                      "  .byte %c0\n"             /* feature bit */             \
334
                      "  .byte 662b-661b\n"       /* sourcelen */               \
335
                      "  .byte 664f-663f\n"       /* replacementlen */          \
336
                      ".previous\n"                                             \
337
                      ".section .altinstr_replacement,\"ax\"\n"                 \
338
                      "663:\n\t" newinstr "\n664:\n"   /* replacement */        \
339
                      ".previous" :: "i" (feature), input)  
340
 
341
/*
342
 * Force strict CPU ordering.
343
 * And yes, this is required on UP too when we're talking
344
 * to devices.
345
 *
346
 * For now, "wmb()" doesn't actually do anything, as all
347
 * Intel CPU's follow what Intel calls a *Processor Order*,
348
 * in which all writes are seen in the program order even
349
 * outside the CPU.
350
 *
351
 * I expect future Intel CPU's to have a weaker ordering,
352
 * but I'd also expect them to finally get their act together
353
 * and add some real memory barriers if so.
354
 *
355
 * Some non intel clones support out of order store. wmb() ceases to be a
356
 * nop for these.
357
 */
358
 
359
 
360
/*
361
 * Actually only lfence would be needed for mb() because all stores done
362
 * by the kernel should be already ordered. But keep a full barrier for now.
363
 */
364
 
365
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
366
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
367
 
368
/**
369
 * read_barrier_depends - Flush all pending reads that subsequents reads
370
 * depend on.
371
 *
372
 * No data-dependent reads from memory-like regions are ever reordered
373
 * over this barrier.  All reads preceding this primitive are guaranteed
374
 * to access memory (but not necessarily other CPUs' caches) before any
375
 * reads following this primitive that depend on the data return by
376
 * any of the preceding reads.  This primitive is much lighter weight than
377
 * rmb() on most CPUs, and is never heavier weight than is
378
 * rmb().
379
 *
380
 * These ordering constraints are respected by both the local CPU
381
 * and the compiler.
382
 *
383
 * Ordering is not guaranteed by anything other than these primitives,
384
 * not even by data dependencies.  See the documentation for
385
 * memory_barrier() for examples and URLs to more information.
386
 *
387
 * For example, the following code would force ordering (the initial
388
 * value of "a" is zero, "b" is one, and "p" is "&a"):
389
 *
390
 * <programlisting>
391
 *      CPU 0                           CPU 1
392
 *
393
 *      b = 2;
394
 *      memory_barrier();
395
 *      p = &b;                         q = p;
396
 *                                      read_barrier_depends();
397
 *                                      d = *q;
398
 * </programlisting>
399
 *
400
 * because the read of "*q" depends on the read of "p" and these
401
 * two reads are separated by a read_barrier_depends().  However,
402
 * the following code, with the same initial values for "a" and "b":
403
 *
404
 * <programlisting>
405
 *      CPU 0                           CPU 1
406
 *
407
 *      a = 2;
408
 *      memory_barrier();
409
 *      b = 3;                          y = b;
410
 *                                      read_barrier_depends();
411
 *                                      x = a;
412
 * </programlisting>
413
 *
414
 * does not enforce ordering, since there is no data dependency between
415
 * the read of "a" and the read of "b".  Therefore, on some CPUs, such
416
 * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
417
 * in cases like thiswhere there are no data dependencies.
418
 **/
419
 
420
#define read_barrier_depends()  do { } while(0)
421
 
422
#ifdef CONFIG_X86_OOSTORE
423
/* Actually there are no OOO store capable CPUs for now that do SSE,
424
   but make it already an possibility. */
425
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
426
#else
427
#define wmb()   __asm__ __volatile__ ("": : :"memory")
428
#endif
429
 
430
#ifdef CONFIG_SMP
431
#define smp_mb()        mb()
432
#define smp_rmb()       rmb()
433
#define smp_wmb()       wmb()
434
#define smp_read_barrier_depends()      read_barrier_depends()
435
#define set_mb(var, value) do { xchg(&var, value); } while (0)
436
#else
437
#define smp_mb()        barrier()
438
#define smp_rmb()       barrier()
439
#define smp_wmb()       barrier()
440
#define smp_read_barrier_depends()      do { } while(0)
441
#define set_mb(var, value) do { var = value; barrier(); } while (0)
442
#endif
443
 
444
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
445
 
446
/* interrupt control.. */
447
#define local_save_flags(x)     do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
448
#define local_irq_restore(x)    do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
449
#define local_irq_disable()     __asm__ __volatile__("cli": : :"memory")
450
#define local_irq_enable()      __asm__ __volatile__("sti": : :"memory")
451
/* used in the idle loop; sti takes one instruction cycle to complete */
452
#define safe_halt()             __asm__ __volatile__("sti; hlt": : :"memory")
453
 
454
#define irqs_disabled()                 \
455
({                                      \
456
        unsigned long flags;            \
457
        local_save_flags(flags);        \
458
        !(flags & (1<<9));              \
459
})
460
 
461
/* For spinlocks etc */
462
#define local_irq_save(x)       __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
463
 
464
/*
465
 * disable hlt during certain critical i/o operations
466
 */
467
#define HAVE_DISABLE_HLT
468
void disable_hlt(void);
469
void enable_hlt(void);
470
 
471
extern unsigned long dmi_broken;
472
extern int is_sony_vaio_laptop;
473
 
474
#define BROKEN_ACPI_Sx          0x0001
475
#define BROKEN_INIT_AFTER_S1    0x0002
476
#define BROKEN_PNP_BIOS         0x0004
477
#define BROKEN_CPUFREQ          0x0008
478
 
479
#endif