Subversion Repositories shark

Rev

Rev 582 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
582 mauro 1
#include <linuxcomp.h>
2
 
3
#include <linux/init.h>
4
#include <linux/string.h>
5
#include <linux/delay.h>
6
#include <linux/smp.h>
7
#include <asm/semaphore.h>
8
#include <asm/processor.h>
9
#include <asm/msr.h>
10
#include <asm/io.h>
11
#include <asm/mmu_context.h>
12
 
13
#include "cpu.h"
14
 
15
/* Added by Nino - Begin */
16
 
17
//#define __CPU_DEBUG__
18
 
19
/*!!! DEBUG */
20
unsigned long cpu_khz = 1000000;
21
/*!!! DEBUG */
22
 
23
int disable_pse __initdata = 0;
24
 
25
/*
26
 * Machine setup..
27
 */
28
 
29
/* cpu data as detected by the assembly code in head.S */
30
struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
31
/* common cpu data for all cpus */
32
struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
33
 
34
unsigned long mmu_cr4_features;
35
 
36
int tsc_disable __initdata = 0; /* TODO */
37
 
38
/* Added by Nino - END */
39
 
40
static int cachesize_override __initdata = -1;
41
static int disable_x86_fxsr __initdata = 0;
42
static int disable_x86_serial_nr __initdata = 1;
43
 
44
struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
45
 
46
extern void mcheck_init(struct cpuinfo_x86 *c);
47
 
48
extern int disable_pse;
49
 
50
static void default_init(struct cpuinfo_x86 * c)
51
{
52
        /* Not much we can do here... */
53
        /* Check if at least it has cpuid */
54
        if (c->cpuid_level == -1) {
55
                /* No cpuid. It must be an ancient CPU */
56
                if (c->x86 == 4)
57
                        strcpy(c->x86_model_id, "486");
58
                else if (c->x86 == 3)
59
                        strcpy(c->x86_model_id, "386");
60
        }
61
}
62
 
63
static struct cpu_dev default_cpu = {
64
        .c_init = default_init,
65
};
66
static struct cpu_dev * this_cpu = &default_cpu;
67
 
68
static int __init cachesize_setup(char *str)
69
{
70
        get_option (&str, &cachesize_override);
71
        return 1;
72
}
73
__setup("cachesize=", cachesize_setup);
74
 
75
int __init get_model_name(struct cpuinfo_x86 *c)
76
{
77
        unsigned int *v;
78
        char *p, *q;
79
 
80
        if (cpuid_eax(0x80000000) < 0x80000004)
81
                return 0;
82
 
83
        v = (unsigned int *) c->x86_model_id;
84
        cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
85
        cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
86
        cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
87
        c->x86_model_id[48] = 0;
88
 
89
        /* Intel chips right-justify this string for some dumb reason;
90
           undo that brain damage */
91
        p = q = &c->x86_model_id[0];
92
        while ( *p == ' ' )
93
             p++;
94
        if ( p != q ) {
95
             while ( *p )
96
                  *q++ = *p++;
97
             while ( q <= &c->x86_model_id[48] )
98
                  *q++ = '\0';  /* Zero-pad the rest */
99
        }
100
 
101
        return 1;
102
}
103
 
104
 
105
void __init display_cacheinfo(struct cpuinfo_x86 *c)
106
{
107
        unsigned int n, dummy, ecx, edx, l2size;
108
 
109
        n = cpuid_eax(0x80000000);
110
 
111
        if (n >= 0x80000005) {
112
                cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
113
                printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
114
                        edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
115
                c->x86_cache_size=(ecx>>24)+(edx>>24); 
116
        }
117
 
118
        if (n < 0x80000006)     // Some chips just has a large L1.
119
                return;
120
 
121
        ecx = cpuid_ecx(0x80000006);
122
        l2size = ecx >> 16;
123
 
124
        /* do processor-specific cache resizing */
125
        if (this_cpu->c_size_cache)
126
                l2size = this_cpu->c_size_cache(c,l2size);
127
 
128
        /* Allow user to override all this if necessary. */
129
        if (cachesize_override != -1)
130
                l2size = cachesize_override;
131
 
132
        if ( l2size == 0 )
133
                return;         // Again, no L2 cache is possible
134
 
135
        c->x86_cache_size = l2size;
136
 
137
        printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
138
               l2size, ecx & 0xFF);
139
}
140
 
141
/* Naming convention should be: <Name> [(<Codename>)] */
142
/* This table only is used unless init_<vendor>() below doesn't set it; */
143
/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
144
 
145
/* Look up CPU names by table lookup. */
146
static char __init *table_lookup_model(struct cpuinfo_x86 *c)
147
{
148
        struct cpu_model_info *info;
149
 
150
        if ( c->x86_model >= 16 )
151
                return NULL;    /* Range check */
152
 
153
        if (!this_cpu)
154
                return NULL;
155
 
156
        info = this_cpu->c_models;
157
 
158
        while (info && info->family) {
159
                if (info->family == c->x86)
160
                        return info->model_names[c->x86_model];
161
                info++;
162
        }
163
        return NULL;            /* Not found */
164
}
165
 
166
 
167
 
168
void __init get_cpu_vendor(struct cpuinfo_x86 *c)
169
{
170
        char *v = c->x86_vendor_id;
171
        int i;
172
 
173
        for (i = 0; i < X86_VENDOR_NUM; i++) {
174
                if (cpu_devs[i]) {
175
                        if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
176
                            (cpu_devs[i]->c_ident[1] &&
177
                             !strcmp(v,cpu_devs[i]->c_ident[1]))) {
178
                                c->x86_vendor = i;
179
                                this_cpu = cpu_devs[i];
180
                                break;
181
                        }
182
                }
183
        }
184
}
185
 
186
 
187
static int __init x86_fxsr_setup(char * s)
188
{
189
        disable_x86_fxsr = 1;
190
        return 1;
191
}
192
__setup("nofxsr", x86_fxsr_setup);
193
 
194
 
195
/* Standard macro to see if a specific flag is changeable */
196
static inline int flag_is_changeable_p(u32 flag)
197
{
198
        u32 f1, f2;
199
 
200
        asm("pushfl\n\t"
201
            "pushfl\n\t"
202
            "popl %0\n\t"
203
            "movl %0,%1\n\t"
204
            "xorl %2,%0\n\t"
205
            "pushl %0\n\t"
206
            "popfl\n\t"
207
            "pushfl\n\t"
208
            "popl %0\n\t"
209
            "popfl\n\t"
210
            : "=&r" (f1), "=&r" (f2)
211
            : "ir" (flag));
212
 
213
        return ((f1^f2) & flag) != 0;
214
}
215
 
216
 
217
/* Probe for the CPUID instruction */
218
int __init have_cpuid_p(void)
219
{
220
        return flag_is_changeable_p(X86_EFLAGS_ID);
221
}
222
 
223
void __init generic_identify(struct cpuinfo_x86 * c)
224
{
225
        u32 tfms, xlvl;
226
        int junk;
227
 
228
        if (have_cpuid_p()) {
229
                /* Get vendor name */
230
                cpuid(0x00000000, &c->cpuid_level,
231
                      (int *)&c->x86_vendor_id[0],
232
                      (int *)&c->x86_vendor_id[8],
233
                      (int *)&c->x86_vendor_id[4]);
234
 
235
                get_cpu_vendor(c);
236
                /* Initialize the standard set of capabilities */
237
                /* Note that the vendor-specific code below might override */
238
 
239
                /* Intel-defined flags: level 0x00000001 */
240
                if ( c->cpuid_level >= 0x00000001 ) {
241
                        u32 capability, excap;
242
                        cpuid(0x00000001, &tfms, &junk, &excap, &capability);
243
                        c->x86_capability[0] = capability;
244
                        c->x86_capability[4] = excap;
245
                        c->x86 = (tfms >> 8) & 15;
246
                        c->x86_model = (tfms >> 4) & 15;
247
                        if (c->x86 == 0xf) {
248
                                c->x86 += (tfms >> 20) & 0xff;
249
                                c->x86_model += ((tfms >> 16) & 0xF) << 4;
250
                        }
251
                        c->x86_mask = tfms & 15;
252
                } else {
253
                        /* Have CPUID level 0 only - unheard of */
254
                        c->x86 = 4;
255
                }
256
 
257
                /* AMD-defined flags: level 0x80000001 */
258
                xlvl = cpuid_eax(0x80000000);
259
                if ( (xlvl & 0xffff0000) == 0x80000000 ) {
260
                        if ( xlvl >= 0x80000001 )
261
                                c->x86_capability[1] = cpuid_edx(0x80000001);
262
                        if ( xlvl >= 0x80000004 )
263
                                get_model_name(c); /* Default name */
264
                }
265
        }
266
}
267
 
268
static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
269
{
270
        if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
271
                /* Disable processor serial number */
272
                unsigned long lo,hi;
273
                rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
274
                lo |= 0x200000;
275
                wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
276
                printk(KERN_NOTICE "CPU serial number disabled.\n");
277
                clear_bit(X86_FEATURE_PN, c->x86_capability);
278
 
279
                /* Disabling the serial number may affect the cpuid level */
280
                c->cpuid_level = cpuid_eax(0);
281
        }
282
}
283
 
284
static int __init x86_serial_nr_setup(char *s)
285
{
286
        disable_x86_serial_nr = 0;
287
        return 1;
288
}
289
__setup("serialnumber", x86_serial_nr_setup);
290
 
291
 
292
 
293
/*
294
 * This does the hard work of actually picking apart the CPU stuff...
295
 */
296
void __init identify_cpu(struct cpuinfo_x86 *c)
297
{
298
        int i;
299
 
300
        c->loops_per_jiffy = 1; //!!!loops_per_jiffy;
301
        c->x86_cache_size = -1;
302
        c->x86_vendor = X86_VENDOR_UNKNOWN;
303
        c->cpuid_level = -1;    /* CPUID not detected */
304
        c->x86_model = c->x86_mask = 0; /* So far unknown... */
305
        c->x86_vendor_id[0] = '\0'; /* Unset */
306
        c->x86_model_id[0] = '\0';  /* Unset */
307
        memset(&c->x86_capability, 0, sizeof c->x86_capability);
308
 
309
        if (!have_cpuid_p()) {
310
                /* First of all, decide if this is a 486 or higher */
311
                /* It's a 486 if we can modify the AC flag */
312
                if ( flag_is_changeable_p(X86_EFLAGS_AC) )
313
                        c->x86 = 4;
314
                else
315
                        c->x86 = 3;
316
        }
317
 
318
        generic_identify(c);
319
 
320
#ifdef __CPU_DEBUG__
321
        printk(KERN_DEBUG "CPU:     After generic identify, caps: %08lx %08lx %08lx %08lx\n",
322
                c->x86_capability[0],
323
                c->x86_capability[1],
324
                c->x86_capability[2],
325
                c->x86_capability[3]);
326
#endif
327
 
328
        if (this_cpu->c_identify) {
329
                this_cpu->c_identify(c);
330
 
331
#ifdef __CPU_DEBUG__
332
        printk(KERN_DEBUG "CPU:     After vendor identify, caps: %08lx %08lx %08lx %08lx\n",
333
                c->x86_capability[0],
334
                c->x86_capability[1],
335
                c->x86_capability[2],
336
                c->x86_capability[3]);
337
#endif
338
}
339
 
340
        /*
341
         * Vendor-specific initialization.  In this section we
342
         * canonicalize the feature flags, meaning if there are
343
         * features a certain CPU supports which CPUID doesn't
344
         * tell us, CPUID claiming incorrect flags, or other bugs,
345
         * we handle them here.
346
         *
347
         * At the end of this section, c->x86_capability better
348
         * indicate the features this CPU genuinely supports!
349
         */
350
        if (this_cpu->c_init)
351
                this_cpu->c_init(c);
352
 
353
        /* Disable the PN if appropriate */
354
        squash_the_stupid_serial_number(c);
355
 
356
        /*
357
         * The vendor-specific functions might have changed features.  Now
358
         * we do "generic changes."
359
         */
360
 
361
        /* TSC disabled? */
362
        /*!!!if ( tsc_disable )
363
                clear_bit(X86_FEATURE_TSC, c->x86_capability);*/
364
 
365
        /* FXSR disabled? */
366
        if (disable_x86_fxsr) {
367
                clear_bit(X86_FEATURE_FXSR, c->x86_capability);
368
                clear_bit(X86_FEATURE_XMM, c->x86_capability);
369
        }
370
 
371
        if (disable_pse)
372
                clear_bit(X86_FEATURE_PSE, c->x86_capability);
373
 
374
        /* If the model name is still unset, do table lookup. */
375
        if ( !c->x86_model_id[0] ) {
376
                char *p;
377
                p = table_lookup_model(c);
378
                if ( p )
379
                        strcpy(c->x86_model_id, p);
380
                else
381
                        /* Last resort... */
382
                        sprintf26(c->x86_model_id, "%02x/%02x",
383
                                c->x86_vendor, c->x86_model);
384
        }
385
 
386
        /* Now the feature flags better reflect actual CPU features! */
387
 
388
#ifdef __CPU_DEBUG__
389
        printk(KERN_DEBUG "CPU:     After all inits, caps: %08lx %08lx %08lx %08lx\n",
390
               c->x86_capability[0],
391
               c->x86_capability[1],
392
               c->x86_capability[2],
393
               c->x86_capability[3]);
394
#endif
395
 
396
        /*
397
         * On SMP, boot_cpu_data holds the common feature set between
398
         * all CPUs; so make sure that we indicate which features are
399
         * common between the CPUs.  The first time this routine gets
400
         * executed, c == &boot_cpu_data.
401
         */
402
        if ( c != &boot_cpu_data ) {
403
                // AND the already accumulated flags with these
404
                for ( i = 0 ; i < NCAPINTS ; i++ )
405
                        boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
406
        }
407
 
408
        /* Init Machine Check Exception if available. */
409
#ifdef CONFIG_X86_MCE
410
        mcheck_init(c);
411
#endif
412
}
413
/*
414
 *      Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
415
 */
416
 
417
void __init dodgy_tsc(void)
418
{
419
        get_cpu_vendor(&boot_cpu_data);
420
        if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
421
            ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC   ))
422
                cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
423
}
424
 
425
void __init print_cpu_info(struct cpuinfo_x86 *c)
426
{
427
        char *vendor = NULL;
428
 
429
        if (c->x86_vendor < X86_VENDOR_NUM)
430
                vendor = this_cpu->c_vendor;
431
        else if (c->cpuid_level >= 0)
432
                vendor = c->x86_vendor_id;
433
 
434
        if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
435
                printk("%s ", vendor);
436
 
437
        if (!c->x86_model_id[0])
438
                printk("%d86", c->x86);
439
        else
440
                printk("%s", c->x86_model_id);
441
 
442
        if (c->x86_mask || c->cpuid_level >= 0)
443
                printk(" stepping %02x\n", c->x86_mask);
444
        else
445
                printk("\n");
446
}
447
 
448
unsigned long cpu_initialized __initdata = 0;
449
 
450
/* This is hacky. :)
451
 * We're emulating future behavior.
452
 * In the future, the cpu-specific init functions will be called implicitly
453
 * via the magic of initcalls.
454
 * They will insert themselves into the cpu_devs structure.
455
 * Then, when cpu_init() is called, we can just iterate over that array.
456
 */
457
 
458
extern int intel_cpu_init(void);
459
extern int cyrix_init_cpu(void);
460
extern int nsc_init_cpu(void);
461
extern int amd_init_cpu(void);
462
extern int centaur_init_cpu(void);
463
extern int transmeta_init_cpu(void);
464
extern int rise_init_cpu(void);
465
extern int nexgen_init_cpu(void);
466
extern int umc_init_cpu(void);
467
 
468
void __init early_cpu_init(void)
469
{
470
        intel_cpu_init();
471
        cyrix_init_cpu();
472
        nsc_init_cpu();
473
        amd_init_cpu();
474
 
475
#ifdef CONFIG_DEBUG_PAGEALLOC
476
        /* pse is not compatible with on-the-fly unmapping,
477
         * disable it even if the cpus claim to support it.
478
         */
479
        clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
480
        disable_pse = 1;
481
#endif
482
}
483
/*
484
 * cpu_init() initializes state that is per-CPU. Some data is already
485
 * initialized (naturally) in the bootstrap process, such as the GDT
486
 * and IDT. We reload them nevertheless, this function acts as a
487
 * 'CPU state barrier', nothing should get across.
488
 */
489
void __init cpu_init (void)
490
{
491
        int cpu = smp_processor_id();
492
        /*!!!struct tss_struct * t = init_tss + cpu;
493
        struct thread_struct *thread = &current->thread;*/
494
 
495
        if (test_and_set_bit(cpu, &cpu_initialized)) {
496
                printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
497
                for (;;) local_irq_enable();
498
        }
499
        printk(KERN_INFO "Initializing CPU#%d\n", cpu);
500
 
501
        if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
502
                clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
503
        if (tsc_disable && cpu_has_tsc) {
504
                printk(KERN_NOTICE "Disabling TSC...\n");
505
                /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
506
                clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
507
                set_in_cr4(X86_CR4_TSD);
508
        }
509
 
510
        /*
511
         * Initialize the per-CPU GDT with the boot GDT,
512
         * and set up the GDT descriptor:
513
         */
514
        /*!!!if (cpu) {
515
                memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
516
                cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
517
                cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
518
        }*/
519
        /*
520
         * Set up the per-thread TLS descriptor cache:
521
         */
522
        /*!!!memcpy(thread->tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8);
523
 
524
        __asm__ __volatile__("lgdt %0" : : "m" (cpu_gdt_descr[cpu]));
525
        __asm__ __volatile__("lidt %0" : : "m" (idt_descr));*/
526
 
527
        /*
528
         * Delete NT
529
         */
530
        __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
531
 
532
        /*
533
         * Set up and load the per-CPU TSS and LDT
534
         */
535
        /*!!!atomic_inc(&init_mm.mm_count);
536
        current->active_mm = &init_mm;
537
        if (current->mm)
538
                BUG();
539
        enter_lazy_tlb(&init_mm, current);
540
 
541
        load_esp0(t, thread->esp0);
542
        set_tss_desc(cpu,t);
543
        cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff;
544
        load_TR_desc();
545
        load_LDT(&init_mm.context);*/
546
 
547
        /* Set up doublefault TSS pointer in the GDT */
548
        /*!!!__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
549
        cpu_gdt_table[cpu][GDT_ENTRY_DOUBLEFAULT_TSS].b &= 0xfffffdff;*/
550
 
551
        /* Clear %fs and %gs. */
552
        asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
553
 
554
        /* Clear all 6 debug registers: */
555
 
556
#define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
557
 
558
        CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
559
 
560
#undef CD
561
 
562
        /*
563
         * Force FPU initialization:
564
         */
565
        current_thread_info()->status = 0;
566
        current->used_math = 0;
567
        stts();
568
}
569
 
570
/* Added by Nino - Begin */
571
 
572
void identify_cpu_0(void)
573
{
574
        identify_cpu(&new_cpu_data);
575
}
576
 
577
void print_cpu_info_0(void)
578
{
579
        print_cpu_info(&new_cpu_data);
580
}
581
 
582
/* Added by Nino - End */