Subversion Repositories shark

Rev

Rev 780 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
422 giacomo 1
/*
2
 * include/asm-i386/processor.h
3
 *
4
 * Copyright (C) 1994 Linus Torvalds
5
 */
6
 
7
#ifndef __ASM_I386_PROCESSOR_H
8
#define __ASM_I386_PROCESSOR_H
9
 
10
#include <asm/vm86.h>
11
#include <asm/math_emu.h>
12
#include <asm/segment.h>
13
#include <asm/page.h>
14
#include <asm/types.h>
15
#include <asm/sigcontext.h>
16
#include <asm/cpufeature.h>
17
#include <asm/msr.h>
18
#include <asm/system.h>
19
#include <linux/cache.h>
20
#include <linux/config.h>
21
#include <linux/threads.h>
22
 
23
/* flag for disabling the tsc */
24
extern int tsc_disable;
25
 
26
struct desc_struct {
27
        unsigned long a,b;
28
};
29
 
30
#define desc_empty(desc) \
31
                (!((desc)->a + (desc)->b))
32
 
33
#define desc_equal(desc1, desc2) \
34
                (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
35
/*
36
 * Default implementation of macro that returns current
37
 * instruction pointer ("program counter").
38
 */
39
#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
40
 
41
/*
42
 *  CPU type and hardware bug flags. Kept separately for each CPU.
43
 *  Members of this structure are referenced in head.S, so think twice
44
 *  before touching them. [mj]
45
 */
46
 
47
struct cpuinfo_x86 {
48
        __u8    x86;            /* CPU family */
49
        __u8    x86_vendor;     /* CPU vendor */
50
        __u8    x86_model;
51
        __u8    x86_mask;
52
        char    wp_works_ok;    /* It doesn't on 386's */
53
        char    hlt_works_ok;   /* Problems on some 486Dx4's and old 386's */
54
        char    hard_math;
55
        char    rfu;
56
        int     cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
57
        unsigned long   x86_capability[NCAPINTS];
58
        char    x86_vendor_id[16];
59
        char    x86_model_id[64];
60
        int     x86_cache_size;  /* in KB - valid for CPUS which support this
61
                                    call  */
62
        int     fdiv_bug;
63
        int     f00f_bug;
64
        int     coma_bug;
65
        unsigned long loops_per_jiffy;
780 giacomo 66
};
422 giacomo 67
 
68
#define X86_VENDOR_INTEL 0
69
#define X86_VENDOR_CYRIX 1
70
#define X86_VENDOR_AMD 2
71
#define X86_VENDOR_UMC 3
72
#define X86_VENDOR_NEXGEN 4
73
#define X86_VENDOR_CENTAUR 5
74
#define X86_VENDOR_RISE 6
75
#define X86_VENDOR_TRANSMETA 7
76
#define X86_VENDOR_NSC 8
77
#define X86_VENDOR_NUM 9
78
#define X86_VENDOR_UNKNOWN 0xff
79
 
80
/*
81
 * capabilities of CPUs
82
 */
83
 
84
extern struct cpuinfo_x86 boot_cpu_data;
85
extern struct cpuinfo_x86 new_cpu_data;
86
extern struct tss_struct doublefault_tss;
87
 
88
#ifdef CONFIG_SMP
89
extern struct cpuinfo_x86 cpu_data[];
90
#define current_cpu_data cpu_data[smp_processor_id()]
91
#else
92
#define cpu_data (&boot_cpu_data)
93
#define current_cpu_data boot_cpu_data
94
#endif
95
 
96
extern char ignore_fpu_irq;
97
 
98
extern void identify_cpu(struct cpuinfo_x86 *);
99
extern void print_cpu_info(struct cpuinfo_x86 *);
100
extern void dodgy_tsc(void);
101
 
102
/*
103
 * EFLAGS bits
104
 */
105
#define X86_EFLAGS_CF   0x00000001 /* Carry Flag */
106
#define X86_EFLAGS_PF   0x00000004 /* Parity Flag */
107
#define X86_EFLAGS_AF   0x00000010 /* Auxillary carry Flag */
108
#define X86_EFLAGS_ZF   0x00000040 /* Zero Flag */
109
#define X86_EFLAGS_SF   0x00000080 /* Sign Flag */
110
#define X86_EFLAGS_TF   0x00000100 /* Trap Flag */
111
#define X86_EFLAGS_IF   0x00000200 /* Interrupt Flag */
112
#define X86_EFLAGS_DF   0x00000400 /* Direction Flag */
113
#define X86_EFLAGS_OF   0x00000800 /* Overflow Flag */
114
#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
115
#define X86_EFLAGS_NT   0x00004000 /* Nested Task */
116
#define X86_EFLAGS_RF   0x00010000 /* Resume Flag */
117
#define X86_EFLAGS_VM   0x00020000 /* Virtual Mode */
118
#define X86_EFLAGS_AC   0x00040000 /* Alignment Check */
119
#define X86_EFLAGS_VIF  0x00080000 /* Virtual Interrupt Flag */
120
#define X86_EFLAGS_VIP  0x00100000 /* Virtual Interrupt Pending */
121
#define X86_EFLAGS_ID   0x00200000 /* CPUID detection flag */
122
 
123
/*
124
 * Generic CPUID function
125
 */
126
static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
127
{
128
        __asm__("cpuid"
129
                : "=a" (*eax),
130
                  "=b" (*ebx),
131
                  "=c" (*ecx),
132
                  "=d" (*edx)
133
                : "0" (op));
134
}
135
 
136
/*
137
 * CPUID functions returning a single datum
138
 */
139
static inline unsigned int cpuid_eax(unsigned int op)
140
{
141
        unsigned int eax;
142
 
143
        __asm__("cpuid"
144
                : "=a" (eax)
145
                : "0" (op)
146
                : "bx", "cx", "dx");
147
        return eax;
148
}
149
static inline unsigned int cpuid_ebx(unsigned int op)
150
{
151
        unsigned int eax, ebx;
152
 
153
        __asm__("cpuid"
154
                : "=a" (eax), "=b" (ebx)
155
                : "0" (op)
156
                : "cx", "dx" );
157
        return ebx;
158
}
159
static inline unsigned int cpuid_ecx(unsigned int op)
160
{
161
        unsigned int eax, ecx;
162
 
163
        __asm__("cpuid"
164
                : "=a" (eax), "=c" (ecx)
165
                : "0" (op)
166
                : "bx", "dx" );
167
        return ecx;
168
}
169
static inline unsigned int cpuid_edx(unsigned int op)
170
{
171
        unsigned int eax, edx;
172
 
173
        __asm__("cpuid"
174
                : "=a" (eax), "=d" (edx)
175
                : "0" (op)
176
                : "bx", "cx");
177
        return edx;
178
}
179
 
180
#define load_cr3(pgdir) \
181
        asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)))
182
 
183
 
184
/*
185
 * Intel CPU features in CR4
186
 */
187
#define X86_CR4_VME             0x0001  /* enable vm86 extensions */
188
#define X86_CR4_PVI             0x0002  /* virtual interrupts flag enable */
189
#define X86_CR4_TSD             0x0004  /* disable time stamp at ipl 3 */
190
#define X86_CR4_DE              0x0008  /* enable debugging extensions */
191
#define X86_CR4_PSE             0x0010  /* enable page size extensions */
192
#define X86_CR4_PAE             0x0020  /* enable physical address extensions */
193
#define X86_CR4_MCE             0x0040  /* Machine check enable */
194
#define X86_CR4_PGE             0x0080  /* enable global pages */
195
#define X86_CR4_PCE             0x0100  /* enable performance counters at ipl 3 */
196
#define X86_CR4_OSFXSR          0x0200  /* enable fast FPU save and restore */
197
#define X86_CR4_OSXMMEXCPT      0x0400  /* enable unmasked SSE exceptions */
198
 
199
/*
200
 * Save the cr4 feature set we're using (ie
201
 * Pentium 4MB enable and PPro Global page
202
 * enable), so that any CPU's that boot up
203
 * after us can get the correct flags.
204
 */
205
extern unsigned long mmu_cr4_features;
206
 
207
static inline void set_in_cr4 (unsigned long mask)
208
{
209
        mmu_cr4_features |= mask;
210
        __asm__("movl %%cr4,%%eax\n\t"
211
                "orl %0,%%eax\n\t"
212
                "movl %%eax,%%cr4\n"
213
                : : "irg" (mask)
214
                :"ax");
215
}
216
 
217
static inline void clear_in_cr4 (unsigned long mask)
218
{
219
        mmu_cr4_features &= ~mask;
220
        __asm__("movl %%cr4,%%eax\n\t"
221
                "andl %0,%%eax\n\t"
222
                "movl %%eax,%%cr4\n"
223
                : : "irg" (~mask)
224
                :"ax");
225
}
226
 
227
/*
228
 *      NSC/Cyrix CPU configuration register indexes
229
 */
230
 
231
#define CX86_PCR0 0x20
232
#define CX86_GCR  0xb8
233
#define CX86_CCR0 0xc0
234
#define CX86_CCR1 0xc1
235
#define CX86_CCR2 0xc2
236
#define CX86_CCR3 0xc3
237
#define CX86_CCR4 0xe8
238
#define CX86_CCR5 0xe9
239
#define CX86_CCR6 0xea
240
#define CX86_CCR7 0xeb
241
#define CX86_PCR1 0xf0
242
#define CX86_DIR0 0xfe
243
#define CX86_DIR1 0xff
244
#define CX86_ARR_BASE 0xc4
245
#define CX86_RCR_BASE 0xdc
246
 
247
/*
248
 *      NSC/Cyrix CPU indexed register access macros
249
 */
250
 
251
#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
252
 
253
#define setCx86(reg, data) do { \
254
        outb((reg), 0x22); \
255
        outb((data), 0x23); \
256
} while (0)
257
 
258
/*
259
 * Bus types (default is ISA, but people can check others with these..)
260
 * pc98 indicates PC98 systems (CBUS)
261
 */
262
extern int MCA_bus;
263
#ifdef CONFIG_X86_PC9800
264
#define pc98 1
265
#else
266
#define pc98 0
267
#endif
268
 
269
static inline void __monitor(const void *eax, unsigned long ecx,
270
                unsigned long edx)
271
{
272
        /* "monitor %eax,%ecx,%edx;" */
273
        asm volatile(
274
                ".byte 0x0f,0x01,0xc8;"
275
                : :"a" (eax), "c" (ecx), "d"(edx));
276
}
277
 
278
static inline void __mwait(unsigned long eax, unsigned long ecx)
279
{
280
        /* "mwait %eax,%ecx;" */
281
        asm volatile(
282
                ".byte 0x0f,0x01,0xc9;"
283
                : :"a" (eax), "c" (ecx));
284
}
285
 
286
/* from system description table in BIOS.  Mostly for MCA use, but
287
others may find it useful. */
288
extern unsigned int machine_id;
289
extern unsigned int machine_submodel_id;
290
extern unsigned int BIOS_revision;
291
extern unsigned int mca_pentium_flag;
292
 
293
/*
294
 * User space process size: 3GB (default).
295
 */
296
#define TASK_SIZE       (PAGE_OFFSET)
297
 
298
/* This decides where the kernel will search for a free chunk of vm
299
 * space during mmap's.
300
 */
301
#define TASK_UNMAPPED_BASE      (PAGE_ALIGN(TASK_SIZE / 3))
302
 
303
/*
304
 * Size of io_bitmap, covering ports 0 to 0x3ff.
305
 */
306
#define IO_BITMAP_BITS  1024
307
#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
308
#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
309
#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
310
#define INVALID_IO_BITMAP_OFFSET 0x8000
311
 
312
struct i387_fsave_struct {
313
        long    cwd;
314
        long    swd;
315
        long    twd;
316
        long    fip;
317
        long    fcs;
318
        long    foo;
319
        long    fos;
320
        long    st_space[20];   /* 8*10 bytes for each FP-reg = 80 bytes */
321
        long    status;         /* software status information */
322
};
323
 
324
struct i387_fxsave_struct {
325
        unsigned short  cwd;
326
        unsigned short  swd;
327
        unsigned short  twd;
328
        unsigned short  fop;
329
        long    fip;
330
        long    fcs;
331
        long    foo;
332
        long    fos;
333
        long    mxcsr;
334
        long    reserved;
335
        long    st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
336
        long    xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
337
        long    padding[56];
338
} __attribute__ ((aligned (16)));
339
 
340
struct i387_soft_struct {
341
        long    cwd;
342
        long    swd;
343
        long    twd;
344
        long    fip;
345
        long    fcs;
346
        long    foo;
347
        long    fos;
348
        long    st_space[20];   /* 8*10 bytes for each FP-reg = 80 bytes */
349
        unsigned char   ftop, changed, lookahead, no_update, rm, alimit;
350
        struct info     *info;
351
        unsigned long   entry_eip;
352
};
353
 
354
union i387_union {
355
        struct i387_fsave_struct        fsave;
356
        struct i387_fxsave_struct       fxsave;
357
        struct i387_soft_struct soft;
358
};
359
 
360
typedef struct {
361
        unsigned long seg;
362
} mm_segment_t;
363
 
364
struct tss_struct {
365
        unsigned short  back_link,__blh;
366
        unsigned long   esp0;
367
        unsigned short  ss0,__ss0h;
368
        unsigned long   esp1;
369
        unsigned short  ss1,__ss1h;     /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
370
        unsigned long   esp2;
371
        unsigned short  ss2,__ss2h;
372
        unsigned long   __cr3;
373
        unsigned long   eip;
374
        unsigned long   eflags;
375
        unsigned long   eax,ecx,edx,ebx;
376
        unsigned long   esp;
377
        unsigned long   ebp;
378
        unsigned long   esi;
379
        unsigned long   edi;
380
        unsigned short  es, __esh;
381
        unsigned short  cs, __csh;
382
        unsigned short  ss, __ssh;
383
        unsigned short  ds, __dsh;
384
        unsigned short  fs, __fsh;
385
        unsigned short  gs, __gsh;
386
        unsigned short  ldt, __ldth;
387
        unsigned short  trace, io_bitmap_base;
388
        /*
389
         * The extra 1 is there because the CPU will access an
390
         * additional byte beyond the end of the IO permission
391
         * bitmap. The extra byte must be all 1 bits, and must
392
         * be within the limit.
393
         */
394
        unsigned long   io_bitmap[IO_BITMAP_LONGS + 1];
395
        /*
396
         * pads the TSS to be cacheline-aligned (size is 0x100)
397
         */
398
        unsigned long __cacheline_filler[5];
399
        /*
400
         * .. and then another 0x100 bytes for emergency kernel stack
401
         */
402
        unsigned long stack[64];
403
} __attribute__((packed));
404
 
1056 tullio 405
// moved here for gcc4 compatibility
406
extern struct tss_struct init_tss[NR_CPUS];
407
 
422 giacomo 408
struct thread_struct {
409
/* cached TLS descriptors. */
410
        struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
411
        unsigned long   esp0;
412
        unsigned long   eip;
413
        unsigned long   esp;
414
        unsigned long   fs;
415
        unsigned long   gs;
416
/* Hardware debugging registers */
417
        unsigned long   debugreg[8];  /* %%db0-7 debug registers */
418
/* fault info */
419
        unsigned long   cr2, trap_no, error_code;
420
/* floating point info */
421
        union i387_union        i387;
422
/* virtual 86 mode info */
423
        struct vm86_struct __user * vm86_info;
424
        unsigned long           screen_bitmap;
425
        unsigned long           v86flags, v86mask, saved_esp0;
426
        unsigned int            saved_fs, saved_gs;
427
/* IO permissions */
428
        unsigned long   *io_bitmap_ptr;
429
};
430
 
431
#define INIT_THREAD  {                                                  \
432
        .vm86_info = NULL,                                              \
433
        .io_bitmap_ptr = NULL,                                          \
434
}
435
 
436
/*
437
 * Note that the .io_bitmap member must be extra-big. This is because
438
 * the CPU will access an additional byte beyond the end of the IO
439
 * permission bitmap. The extra byte must be all 1 bits, and must
440
 * be within the limit.
441
 */
442
#define INIT_TSS  {                                                     \
443
        .esp0           = sizeof(init_stack) + (long)&init_stack,       \
444
        .ss0            = __KERNEL_DS,                                  \
445
        .esp1           = sizeof(init_tss[0]) + (long)&init_tss[0],     \
446
        .ss1            = __KERNEL_CS,                                  \
447
        .ldt            = GDT_ENTRY_LDT,                                \
448
        .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,                     \
449
        .io_bitmap      = { [ 0 ... IO_BITMAP_LONGS] = ~0 },            \
450
}
451
 
452
static inline void load_esp0(struct tss_struct *tss, unsigned long esp0)
453
{
454
        tss->esp0 = esp0;
455
        /* This can only happen when SEP is enabled, no need to test "SEP"arately */
456
        if ((unlikely(tss->ss1 != __KERNEL_CS))) {
457
                tss->ss1 = __KERNEL_CS;
458
                wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
459
        }
460
}
461
 
462
static inline void disable_sysenter(struct tss_struct *tss)
463
{
464
        if (cpu_has_sep)  {
465
                tss->ss1 = 0;
466
                wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
467
        }
468
}
469
 
470
#define start_thread(regs, new_eip, new_esp) do {               \
471
        __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));       \
472
        set_fs(USER_DS);                                        \
473
        regs->xds = __USER_DS;                                  \
474
        regs->xes = __USER_DS;                                  \
475
        regs->xss = __USER_DS;                                  \
476
        regs->xcs = __USER_CS;                                  \
477
        regs->eip = new_eip;                                    \
478
        regs->esp = new_esp;                                    \
479
} while (0)
480
 
481
/* Forward declaration, a strange C thing */
482
struct task_struct;
483
struct mm_struct;
484
 
485
/* Free all resources held by a thread. */
486
extern void release_thread(struct task_struct *);
487
 
488
/* Prepare to copy thread state - unlazy all lazy status */
489
extern void prepare_to_copy(struct task_struct *tsk);
490
 
491
/*
492
 * create a kernel thread without removing it from tasklists
493
 */
494
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
495
 
496
extern unsigned long thread_saved_pc(struct task_struct *tsk);
497
void show_trace(struct task_struct *task, unsigned long *stack);
498
 
499
unsigned long get_wchan(struct task_struct *p);
500
#define KSTK_EIP(tsk)   (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1019])
501
#define KSTK_ESP(tsk)   (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1022])
502
 
503
struct microcode_header {
504
        unsigned int hdrver;
505
        unsigned int rev;
506
        unsigned int date;
507
        unsigned int sig;
508
        unsigned int cksum;
509
        unsigned int ldrver;
510
        unsigned int pf;
511
        unsigned int datasize;
512
        unsigned int totalsize;
513
        unsigned int reserved[3];
514
};
515
 
516
struct microcode {
517
        struct microcode_header hdr;
518
        unsigned int bits[0];
519
};
520
 
521
typedef struct microcode microcode_t;
522
typedef struct microcode_header microcode_header_t;
523
 
524
/* microcode format is extended from prescott processors */
525
struct extended_signature {
526
        unsigned int sig;
527
        unsigned int pf;
528
        unsigned int cksum;
529
};
530
 
531
struct extended_sigtable {
532
        unsigned int count;
533
        unsigned int cksum;
534
        unsigned int reserved[3];
535
        struct extended_signature sigs[0];
536
};
537
/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
538
#define MICROCODE_IOCFREE       _IO('6',0)
539
 
540
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
541
static inline void rep_nop(void)
542
{
543
        __asm__ __volatile__("rep;nop": : :"memory");
544
}
545
 
546
#define cpu_relax()     rep_nop()
547
 
548
/* generic versions from gas */
549
#define GENERIC_NOP1    ".byte 0x90\n"
550
#define GENERIC_NOP2            ".byte 0x89,0xf6\n"
551
#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"
552
#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"
553
#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4
554
#define GENERIC_NOP6    ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
555
#define GENERIC_NOP7    ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
556
#define GENERIC_NOP8    GENERIC_NOP1 GENERIC_NOP7
557
 
558
/* Opteron nops */
559
#define K8_NOP1 GENERIC_NOP1
560
#define K8_NOP2 ".byte 0x66,0x90\n" 
561
#define K8_NOP3 ".byte 0x66,0x66,0x90\n" 
562
#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" 
563
#define K8_NOP5 K8_NOP3 K8_NOP2 
564
#define K8_NOP6 K8_NOP3 K8_NOP3
565
#define K8_NOP7 K8_NOP4 K8_NOP3
566
#define K8_NOP8 K8_NOP4 K8_NOP4
567
 
568
/* K7 nops */
569
/* uses eax dependencies (arbitary choice) */
570
#define K7_NOP1  GENERIC_NOP1
571
#define K7_NOP2 ".byte 0x8b,0xc0\n" 
572
#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
573
#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
574
#define K7_NOP5 K7_NOP4 ASM_NOP1
575
#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
576
#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
577
#define K7_NOP8        K7_NOP7 ASM_NOP1
578
 
579
#ifdef CONFIG_MK8
580
#define ASM_NOP1 K8_NOP1
581
#define ASM_NOP2 K8_NOP2
582
#define ASM_NOP3 K8_NOP3
583
#define ASM_NOP4 K8_NOP4
584
#define ASM_NOP5 K8_NOP5
585
#define ASM_NOP6 K8_NOP6
586
#define ASM_NOP7 K8_NOP7
587
#define ASM_NOP8 K8_NOP8
588
#elif defined(CONFIG_MK7)
589
#define ASM_NOP1 K7_NOP1
590
#define ASM_NOP2 K7_NOP2
591
#define ASM_NOP3 K7_NOP3
592
#define ASM_NOP4 K7_NOP4
593
#define ASM_NOP5 K7_NOP5
594
#define ASM_NOP6 K7_NOP6
595
#define ASM_NOP7 K7_NOP7
596
#define ASM_NOP8 K7_NOP8
597
#else
598
#define ASM_NOP1 GENERIC_NOP1
599
#define ASM_NOP2 GENERIC_NOP2
600
#define ASM_NOP3 GENERIC_NOP3
601
#define ASM_NOP4 GENERIC_NOP4
602
#define ASM_NOP5 GENERIC_NOP5
603
#define ASM_NOP6 GENERIC_NOP6
604
#define ASM_NOP7 GENERIC_NOP7
605
#define ASM_NOP8 GENERIC_NOP8
606
#endif
607
 
608
#define ASM_NOP_MAX 8
609
 
610
/* Prefetch instructions for Pentium III and AMD Athlon */
611
/* It's not worth to care about 3dnow! prefetches for the K6
612
   because they are microcoded there and very slow.
613
   However we don't do prefetches for pre XP Athlons currently
614
   That should be fixed. */
615
#define ARCH_HAS_PREFETCH
616
extern inline void prefetch(const void *x)
617
{
618
        alternative_input(ASM_NOP4,
619
                          "prefetchnta (%1)",
620
                          X86_FEATURE_XMM,
621
                          "r" (x));
622
}
623
 
624
#define ARCH_HAS_PREFETCH
625
#define ARCH_HAS_PREFETCHW
626
#define ARCH_HAS_SPINLOCK_PREFETCH
627
 
628
/* 3dnow! prefetch to get an exclusive cache line. Useful for
629
   spinlocks to avoid one state transition in the cache coherency protocol. */
630
extern inline void prefetchw(const void *x)
631
{
632
        alternative_input(ASM_NOP4,
633
                          "prefetchw (%1)",
634
                          X86_FEATURE_3DNOW,
635
                          "r" (x));
636
}
637
#define spin_lock_prefetch(x)   prefetchw(x)
638
 
639
extern void select_idle_routine(const struct cpuinfo_x86 *c);
640
 
641
#endif /* __ASM_I386_PROCESSOR_H */