Subversion Repositories shark

Rev

Rev 422 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
422 giacomo 1
/*
2
 * include/asm-i386/processor.h
3
 *
4
 * Copyright (C) 1994 Linus Torvalds
5
 */
6
 
7
#ifndef __ASM_I386_PROCESSOR_H
8
#define __ASM_I386_PROCESSOR_H
9
 
10
#include <asm/vm86.h>
11
#include <asm/math_emu.h>
12
#include <asm/segment.h>
13
#include <asm/page.h>
14
#include <asm/types.h>
15
#include <asm/sigcontext.h>
16
#include <asm/cpufeature.h>
17
#include <asm/msr.h>
18
#include <asm/system.h>
19
#include <linux/cache.h>
20
#include <linux/config.h>
21
#include <linux/threads.h>
22
 
23
/* flag for disabling the tsc */
24
extern int tsc_disable;
25
 
26
struct desc_struct {
27
        unsigned long a,b;
28
};
29
 
30
#define desc_empty(desc) \
31
                (!((desc)->a + (desc)->b))
32
 
33
#define desc_equal(desc1, desc2) \
34
                (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
35
/*
36
 * Default implementation of macro that returns current
37
 * instruction pointer ("program counter").
38
 */
39
#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
40
 
41
/*
42
 *  CPU type and hardware bug flags. Kept separately for each CPU.
43
 *  Members of this structure are referenced in head.S, so think twice
44
 *  before touching them. [mj]
45
 */
46
 
47
struct cpuinfo_x86 {
48
        __u8    x86;            /* CPU family */
49
        __u8    x86_vendor;     /* CPU vendor */
50
        __u8    x86_model;
51
        __u8    x86_mask;
52
        char    wp_works_ok;    /* It doesn't on 386's */
53
        char    hlt_works_ok;   /* Problems on some 486Dx4's and old 386's */
54
        char    hard_math;
55
        char    rfu;
56
        int     cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
57
        unsigned long   x86_capability[NCAPINTS];
58
        char    x86_vendor_id[16];
59
        char    x86_model_id[64];
60
        int     x86_cache_size;  /* in KB - valid for CPUS which support this
61
                                    call  */
62
        int     fdiv_bug;
63
        int     f00f_bug;
64
        int     coma_bug;
65
        unsigned long loops_per_jiffy;
780 giacomo 66
};
422 giacomo 67
 
68
#define X86_VENDOR_INTEL 0
69
#define X86_VENDOR_CYRIX 1
70
#define X86_VENDOR_AMD 2
71
#define X86_VENDOR_UMC 3
72
#define X86_VENDOR_NEXGEN 4
73
#define X86_VENDOR_CENTAUR 5
74
#define X86_VENDOR_RISE 6
75
#define X86_VENDOR_TRANSMETA 7
76
#define X86_VENDOR_NSC 8
77
#define X86_VENDOR_NUM 9
78
#define X86_VENDOR_UNKNOWN 0xff
79
 
80
/*
81
 * capabilities of CPUs
82
 */
83
 
84
extern struct cpuinfo_x86 boot_cpu_data;
85
extern struct cpuinfo_x86 new_cpu_data;
86
extern struct tss_struct init_tss[NR_CPUS];
87
extern struct tss_struct doublefault_tss;
88
 
89
#ifdef CONFIG_SMP
90
extern struct cpuinfo_x86 cpu_data[];
91
#define current_cpu_data cpu_data[smp_processor_id()]
92
#else
93
#define cpu_data (&boot_cpu_data)
94
#define current_cpu_data boot_cpu_data
95
#endif
96
 
97
extern char ignore_fpu_irq;
98
 
99
extern void identify_cpu(struct cpuinfo_x86 *);
100
extern void print_cpu_info(struct cpuinfo_x86 *);
101
extern void dodgy_tsc(void);
102
 
103
/*
104
 * EFLAGS bits
105
 */
106
#define X86_EFLAGS_CF   0x00000001 /* Carry Flag */
107
#define X86_EFLAGS_PF   0x00000004 /* Parity Flag */
108
#define X86_EFLAGS_AF   0x00000010 /* Auxillary carry Flag */
109
#define X86_EFLAGS_ZF   0x00000040 /* Zero Flag */
110
#define X86_EFLAGS_SF   0x00000080 /* Sign Flag */
111
#define X86_EFLAGS_TF   0x00000100 /* Trap Flag */
112
#define X86_EFLAGS_IF   0x00000200 /* Interrupt Flag */
113
#define X86_EFLAGS_DF   0x00000400 /* Direction Flag */
114
#define X86_EFLAGS_OF   0x00000800 /* Overflow Flag */
115
#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
116
#define X86_EFLAGS_NT   0x00004000 /* Nested Task */
117
#define X86_EFLAGS_RF   0x00010000 /* Resume Flag */
118
#define X86_EFLAGS_VM   0x00020000 /* Virtual Mode */
119
#define X86_EFLAGS_AC   0x00040000 /* Alignment Check */
120
#define X86_EFLAGS_VIF  0x00080000 /* Virtual Interrupt Flag */
121
#define X86_EFLAGS_VIP  0x00100000 /* Virtual Interrupt Pending */
122
#define X86_EFLAGS_ID   0x00200000 /* CPUID detection flag */
123
 
124
/*
125
 * Generic CPUID function
126
 */
127
static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
128
{
129
        __asm__("cpuid"
130
                : "=a" (*eax),
131
                  "=b" (*ebx),
132
                  "=c" (*ecx),
133
                  "=d" (*edx)
134
                : "0" (op));
135
}
136
 
137
/*
138
 * CPUID functions returning a single datum
139
 */
140
static inline unsigned int cpuid_eax(unsigned int op)
141
{
142
        unsigned int eax;
143
 
144
        __asm__("cpuid"
145
                : "=a" (eax)
146
                : "0" (op)
147
                : "bx", "cx", "dx");
148
        return eax;
149
}
150
static inline unsigned int cpuid_ebx(unsigned int op)
151
{
152
        unsigned int eax, ebx;
153
 
154
        __asm__("cpuid"
155
                : "=a" (eax), "=b" (ebx)
156
                : "0" (op)
157
                : "cx", "dx" );
158
        return ebx;
159
}
160
static inline unsigned int cpuid_ecx(unsigned int op)
161
{
162
        unsigned int eax, ecx;
163
 
164
        __asm__("cpuid"
165
                : "=a" (eax), "=c" (ecx)
166
                : "0" (op)
167
                : "bx", "dx" );
168
        return ecx;
169
}
170
static inline unsigned int cpuid_edx(unsigned int op)
171
{
172
        unsigned int eax, edx;
173
 
174
        __asm__("cpuid"
175
                : "=a" (eax), "=d" (edx)
176
                : "0" (op)
177
                : "bx", "cx");
178
        return edx;
179
}
180
 
181
#define load_cr3(pgdir) \
182
        asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)))
183
 
184
 
185
/*
186
 * Intel CPU features in CR4
187
 */
188
#define X86_CR4_VME             0x0001  /* enable vm86 extensions */
189
#define X86_CR4_PVI             0x0002  /* virtual interrupts flag enable */
190
#define X86_CR4_TSD             0x0004  /* disable time stamp at ipl 3 */
191
#define X86_CR4_DE              0x0008  /* enable debugging extensions */
192
#define X86_CR4_PSE             0x0010  /* enable page size extensions */
193
#define X86_CR4_PAE             0x0020  /* enable physical address extensions */
194
#define X86_CR4_MCE             0x0040  /* Machine check enable */
195
#define X86_CR4_PGE             0x0080  /* enable global pages */
196
#define X86_CR4_PCE             0x0100  /* enable performance counters at ipl 3 */
197
#define X86_CR4_OSFXSR          0x0200  /* enable fast FPU save and restore */
198
#define X86_CR4_OSXMMEXCPT      0x0400  /* enable unmasked SSE exceptions */
199
 
200
/*
201
 * Save the cr4 feature set we're using (ie
202
 * Pentium 4MB enable and PPro Global page
203
 * enable), so that any CPU's that boot up
204
 * after us can get the correct flags.
205
 */
206
extern unsigned long mmu_cr4_features;
207
 
208
static inline void set_in_cr4 (unsigned long mask)
209
{
210
        mmu_cr4_features |= mask;
211
        __asm__("movl %%cr4,%%eax\n\t"
212
                "orl %0,%%eax\n\t"
213
                "movl %%eax,%%cr4\n"
214
                : : "irg" (mask)
215
                :"ax");
216
}
217
 
218
static inline void clear_in_cr4 (unsigned long mask)
219
{
220
        mmu_cr4_features &= ~mask;
221
        __asm__("movl %%cr4,%%eax\n\t"
222
                "andl %0,%%eax\n\t"
223
                "movl %%eax,%%cr4\n"
224
                : : "irg" (~mask)
225
                :"ax");
226
}
227
 
228
/*
229
 *      NSC/Cyrix CPU configuration register indexes
230
 */
231
 
232
#define CX86_PCR0 0x20
233
#define CX86_GCR  0xb8
234
#define CX86_CCR0 0xc0
235
#define CX86_CCR1 0xc1
236
#define CX86_CCR2 0xc2
237
#define CX86_CCR3 0xc3
238
#define CX86_CCR4 0xe8
239
#define CX86_CCR5 0xe9
240
#define CX86_CCR6 0xea
241
#define CX86_CCR7 0xeb
242
#define CX86_PCR1 0xf0
243
#define CX86_DIR0 0xfe
244
#define CX86_DIR1 0xff
245
#define CX86_ARR_BASE 0xc4
246
#define CX86_RCR_BASE 0xdc
247
 
248
/*
249
 *      NSC/Cyrix CPU indexed register access macros
250
 */
251
 
252
#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
253
 
254
#define setCx86(reg, data) do { \
255
        outb((reg), 0x22); \
256
        outb((data), 0x23); \
257
} while (0)
258
 
259
/*
260
 * Bus types (default is ISA, but people can check others with these..)
261
 * pc98 indicates PC98 systems (CBUS)
262
 */
263
extern int MCA_bus;
264
#ifdef CONFIG_X86_PC9800
265
#define pc98 1
266
#else
267
#define pc98 0
268
#endif
269
 
270
static inline void __monitor(const void *eax, unsigned long ecx,
271
                unsigned long edx)
272
{
273
        /* "monitor %eax,%ecx,%edx;" */
274
        asm volatile(
275
                ".byte 0x0f,0x01,0xc8;"
276
                : :"a" (eax), "c" (ecx), "d"(edx));
277
}
278
 
279
static inline void __mwait(unsigned long eax, unsigned long ecx)
280
{
281
        /* "mwait %eax,%ecx;" */
282
        asm volatile(
283
                ".byte 0x0f,0x01,0xc9;"
284
                : :"a" (eax), "c" (ecx));
285
}
286
 
287
/* from system description table in BIOS.  Mostly for MCA use, but
288
others may find it useful. */
289
extern unsigned int machine_id;
290
extern unsigned int machine_submodel_id;
291
extern unsigned int BIOS_revision;
292
extern unsigned int mca_pentium_flag;
293
 
294
/*
295
 * User space process size: 3GB (default).
296
 */
297
#define TASK_SIZE       (PAGE_OFFSET)
298
 
299
/* This decides where the kernel will search for a free chunk of vm
300
 * space during mmap's.
301
 */
302
#define TASK_UNMAPPED_BASE      (PAGE_ALIGN(TASK_SIZE / 3))
303
 
304
/*
305
 * Size of io_bitmap, covering ports 0 to 0x3ff.
306
 */
307
#define IO_BITMAP_BITS  1024
308
#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
309
#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
310
#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
311
#define INVALID_IO_BITMAP_OFFSET 0x8000
312
 
313
struct i387_fsave_struct {
314
        long    cwd;
315
        long    swd;
316
        long    twd;
317
        long    fip;
318
        long    fcs;
319
        long    foo;
320
        long    fos;
321
        long    st_space[20];   /* 8*10 bytes for each FP-reg = 80 bytes */
322
        long    status;         /* software status information */
323
};
324
 
325
struct i387_fxsave_struct {
326
        unsigned short  cwd;
327
        unsigned short  swd;
328
        unsigned short  twd;
329
        unsigned short  fop;
330
        long    fip;
331
        long    fcs;
332
        long    foo;
333
        long    fos;
334
        long    mxcsr;
335
        long    reserved;
336
        long    st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
337
        long    xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
338
        long    padding[56];
339
} __attribute__ ((aligned (16)));
340
 
341
struct i387_soft_struct {
342
        long    cwd;
343
        long    swd;
344
        long    twd;
345
        long    fip;
346
        long    fcs;
347
        long    foo;
348
        long    fos;
349
        long    st_space[20];   /* 8*10 bytes for each FP-reg = 80 bytes */
350
        unsigned char   ftop, changed, lookahead, no_update, rm, alimit;
351
        struct info     *info;
352
        unsigned long   entry_eip;
353
};
354
 
355
union i387_union {
356
        struct i387_fsave_struct        fsave;
357
        struct i387_fxsave_struct       fxsave;
358
        struct i387_soft_struct soft;
359
};
360
 
361
typedef struct {
362
        unsigned long seg;
363
} mm_segment_t;
364
 
365
struct tss_struct {
366
        unsigned short  back_link,__blh;
367
        unsigned long   esp0;
368
        unsigned short  ss0,__ss0h;
369
        unsigned long   esp1;
370
        unsigned short  ss1,__ss1h;     /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
371
        unsigned long   esp2;
372
        unsigned short  ss2,__ss2h;
373
        unsigned long   __cr3;
374
        unsigned long   eip;
375
        unsigned long   eflags;
376
        unsigned long   eax,ecx,edx,ebx;
377
        unsigned long   esp;
378
        unsigned long   ebp;
379
        unsigned long   esi;
380
        unsigned long   edi;
381
        unsigned short  es, __esh;
382
        unsigned short  cs, __csh;
383
        unsigned short  ss, __ssh;
384
        unsigned short  ds, __dsh;
385
        unsigned short  fs, __fsh;
386
        unsigned short  gs, __gsh;
387
        unsigned short  ldt, __ldth;
388
        unsigned short  trace, io_bitmap_base;
389
        /*
390
         * The extra 1 is there because the CPU will access an
391
         * additional byte beyond the end of the IO permission
392
         * bitmap. The extra byte must be all 1 bits, and must
393
         * be within the limit.
394
         */
395
        unsigned long   io_bitmap[IO_BITMAP_LONGS + 1];
396
        /*
397
         * pads the TSS to be cacheline-aligned (size is 0x100)
398
         */
399
        unsigned long __cacheline_filler[5];
400
        /*
401
         * .. and then another 0x100 bytes for emergency kernel stack
402
         */
403
        unsigned long stack[64];
404
} __attribute__((packed));
405
 
406
struct thread_struct {
407
/* cached TLS descriptors. */
408
        struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
409
        unsigned long   esp0;
410
        unsigned long   eip;
411
        unsigned long   esp;
412
        unsigned long   fs;
413
        unsigned long   gs;
414
/* Hardware debugging registers */
415
        unsigned long   debugreg[8];  /* %%db0-7 debug registers */
416
/* fault info */
417
        unsigned long   cr2, trap_no, error_code;
418
/* floating point info */
419
        union i387_union        i387;
420
/* virtual 86 mode info */
421
        struct vm86_struct __user * vm86_info;
422
        unsigned long           screen_bitmap;
423
        unsigned long           v86flags, v86mask, saved_esp0;
424
        unsigned int            saved_fs, saved_gs;
425
/* IO permissions */
426
        unsigned long   *io_bitmap_ptr;
427
};
428
 
429
#define INIT_THREAD  {                                                  \
430
        .vm86_info = NULL,                                              \
431
        .io_bitmap_ptr = NULL,                                          \
432
}
433
 
434
/*
435
 * Note that the .io_bitmap member must be extra-big. This is because
436
 * the CPU will access an additional byte beyond the end of the IO
437
 * permission bitmap. The extra byte must be all 1 bits, and must
438
 * be within the limit.
439
 */
440
#define INIT_TSS  {                                                     \
441
        .esp0           = sizeof(init_stack) + (long)&init_stack,       \
442
        .ss0            = __KERNEL_DS,                                  \
443
        .esp1           = sizeof(init_tss[0]) + (long)&init_tss[0],     \
444
        .ss1            = __KERNEL_CS,                                  \
445
        .ldt            = GDT_ENTRY_LDT,                                \
446
        .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,                     \
447
        .io_bitmap      = { [ 0 ... IO_BITMAP_LONGS] = ~0 },            \
448
}
449
 
450
static inline void load_esp0(struct tss_struct *tss, unsigned long esp0)
451
{
452
        tss->esp0 = esp0;
453
        /* This can only happen when SEP is enabled, no need to test "SEP"arately */
454
        if ((unlikely(tss->ss1 != __KERNEL_CS))) {
455
                tss->ss1 = __KERNEL_CS;
456
                wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
457
        }
458
}
459
 
460
static inline void disable_sysenter(struct tss_struct *tss)
461
{
462
        if (cpu_has_sep)  {
463
                tss->ss1 = 0;
464
                wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
465
        }
466
}
467
 
468
#define start_thread(regs, new_eip, new_esp) do {               \
469
        __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));       \
470
        set_fs(USER_DS);                                        \
471
        regs->xds = __USER_DS;                                  \
472
        regs->xes = __USER_DS;                                  \
473
        regs->xss = __USER_DS;                                  \
474
        regs->xcs = __USER_CS;                                  \
475
        regs->eip = new_eip;                                    \
476
        regs->esp = new_esp;                                    \
477
} while (0)
478
 
479
/* Forward declaration, a strange C thing */
480
struct task_struct;
481
struct mm_struct;
482
 
483
/* Free all resources held by a thread. */
484
extern void release_thread(struct task_struct *);
485
 
486
/* Prepare to copy thread state - unlazy all lazy status */
487
extern void prepare_to_copy(struct task_struct *tsk);
488
 
489
/*
490
 * create a kernel thread without removing it from tasklists
491
 */
492
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
493
 
494
extern unsigned long thread_saved_pc(struct task_struct *tsk);
495
void show_trace(struct task_struct *task, unsigned long *stack);
496
 
497
unsigned long get_wchan(struct task_struct *p);
498
#define KSTK_EIP(tsk)   (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1019])
499
#define KSTK_ESP(tsk)   (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1022])
500
 
501
struct microcode_header {
502
        unsigned int hdrver;
503
        unsigned int rev;
504
        unsigned int date;
505
        unsigned int sig;
506
        unsigned int cksum;
507
        unsigned int ldrver;
508
        unsigned int pf;
509
        unsigned int datasize;
510
        unsigned int totalsize;
511
        unsigned int reserved[3];
512
};
513
 
514
struct microcode {
515
        struct microcode_header hdr;
516
        unsigned int bits[0];
517
};
518
 
519
typedef struct microcode microcode_t;
520
typedef struct microcode_header microcode_header_t;
521
 
522
/* microcode format is extended from prescott processors */
523
struct extended_signature {
524
        unsigned int sig;
525
        unsigned int pf;
526
        unsigned int cksum;
527
};
528
 
529
struct extended_sigtable {
530
        unsigned int count;
531
        unsigned int cksum;
532
        unsigned int reserved[3];
533
        struct extended_signature sigs[0];
534
};
535
/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
536
#define MICROCODE_IOCFREE       _IO('6',0)
537
 
538
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
539
static inline void rep_nop(void)
540
{
541
        __asm__ __volatile__("rep;nop": : :"memory");
542
}
543
 
544
#define cpu_relax()     rep_nop()
545
 
546
/* generic versions from gas */
547
#define GENERIC_NOP1    ".byte 0x90\n"
548
#define GENERIC_NOP2            ".byte 0x89,0xf6\n"
549
#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"
550
#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"
551
#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4
552
#define GENERIC_NOP6    ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
553
#define GENERIC_NOP7    ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
554
#define GENERIC_NOP8    GENERIC_NOP1 GENERIC_NOP7
555
 
556
/* Opteron nops */
557
#define K8_NOP1 GENERIC_NOP1
558
#define K8_NOP2 ".byte 0x66,0x90\n" 
559
#define K8_NOP3 ".byte 0x66,0x66,0x90\n" 
560
#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" 
561
#define K8_NOP5 K8_NOP3 K8_NOP2 
562
#define K8_NOP6 K8_NOP3 K8_NOP3
563
#define K8_NOP7 K8_NOP4 K8_NOP3
564
#define K8_NOP8 K8_NOP4 K8_NOP4
565
 
566
/* K7 nops */
567
/* uses eax dependencies (arbitary choice) */
568
#define K7_NOP1  GENERIC_NOP1
569
#define K7_NOP2 ".byte 0x8b,0xc0\n" 
570
#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
571
#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
572
#define K7_NOP5 K7_NOP4 ASM_NOP1
573
#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
574
#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
575
#define K7_NOP8        K7_NOP7 ASM_NOP1
576
 
577
#ifdef CONFIG_MK8
578
#define ASM_NOP1 K8_NOP1
579
#define ASM_NOP2 K8_NOP2
580
#define ASM_NOP3 K8_NOP3
581
#define ASM_NOP4 K8_NOP4
582
#define ASM_NOP5 K8_NOP5
583
#define ASM_NOP6 K8_NOP6
584
#define ASM_NOP7 K8_NOP7
585
#define ASM_NOP8 K8_NOP8
586
#elif defined(CONFIG_MK7)
587
#define ASM_NOP1 K7_NOP1
588
#define ASM_NOP2 K7_NOP2
589
#define ASM_NOP3 K7_NOP3
590
#define ASM_NOP4 K7_NOP4
591
#define ASM_NOP5 K7_NOP5
592
#define ASM_NOP6 K7_NOP6
593
#define ASM_NOP7 K7_NOP7
594
#define ASM_NOP8 K7_NOP8
595
#else
596
#define ASM_NOP1 GENERIC_NOP1
597
#define ASM_NOP2 GENERIC_NOP2
598
#define ASM_NOP3 GENERIC_NOP3
599
#define ASM_NOP4 GENERIC_NOP4
600
#define ASM_NOP5 GENERIC_NOP5
601
#define ASM_NOP6 GENERIC_NOP6
602
#define ASM_NOP7 GENERIC_NOP7
603
#define ASM_NOP8 GENERIC_NOP8
604
#endif
605
 
606
#define ASM_NOP_MAX 8
607
 
608
/* Prefetch instructions for Pentium III and AMD Athlon */
609
/* It's not worth to care about 3dnow! prefetches for the K6
610
   because they are microcoded there and very slow.
611
   However we don't do prefetches for pre XP Athlons currently
612
   That should be fixed. */
613
#define ARCH_HAS_PREFETCH
614
extern inline void prefetch(const void *x)
615
{
616
        alternative_input(ASM_NOP4,
617
                          "prefetchnta (%1)",
618
                          X86_FEATURE_XMM,
619
                          "r" (x));
620
}
621
 
622
#define ARCH_HAS_PREFETCH
623
#define ARCH_HAS_PREFETCHW
624
#define ARCH_HAS_SPINLOCK_PREFETCH
625
 
626
/* 3dnow! prefetch to get an exclusive cache line. Useful for
627
   spinlocks to avoid one state transition in the cache coherency protocol. */
628
extern inline void prefetchw(const void *x)
629
{
630
        alternative_input(ASM_NOP4,
631
                          "prefetchw (%1)",
632
                          X86_FEATURE_3DNOW,
633
                          "r" (x));
634
}
635
#define spin_lock_prefetch(x)   prefetchw(x)
636
 
637
extern void select_idle_routine(const struct cpuinfo_x86 *c);
638
 
639
#endif /* __ASM_I386_PROCESSOR_H */