/shark/trunk/drivers/cpu/cyrix.c |
---|
0,0 → 1,437 |
#include <linuxcomp.h> |
#include <linux/init.h> |
#include <linux/bitops.h> |
#include <linux/delay.h> |
#include <linux/pci.h> |
#include <asm/dma.h> |
#include <asm/io.h> |
#include <asm/processor.h> |
#include <asm/timer.h> |
#include "cpu.h" |
/* |
* Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU |
*/ |
void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) |
{ |
unsigned char ccr2, ccr3; |
unsigned long flags; |
/* we test for DEVID by checking whether CCR3 is writable */ |
local_irq_save(flags); |
ccr3 = getCx86(CX86_CCR3); |
setCx86(CX86_CCR3, ccr3 ^ 0x80); |
getCx86(0xc0); /* dummy to change bus */ |
if (getCx86(CX86_CCR3) == ccr3) { /* no DEVID regs. */ |
ccr2 = getCx86(CX86_CCR2); |
setCx86(CX86_CCR2, ccr2 ^ 0x04); |
getCx86(0xc0); /* dummy */ |
if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */ |
*dir0 = 0xfd; |
else { /* Cx486S A step */ |
setCx86(CX86_CCR2, ccr2); |
*dir0 = 0xfe; |
} |
} |
else { |
setCx86(CX86_CCR3, ccr3); /* restore CCR3 */ |
/* read DIR0 and DIR1 CPU registers */ |
*dir0 = getCx86(CX86_DIR0); |
*dir1 = getCx86(CX86_DIR1); |
} |
local_irq_restore(flags); |
} |
/* |
* Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in |
* order to identify the Cyrix CPU model after we're out of setup.c |
* |
* Actually since bugs.h doesn't even reference this perhaps someone should |
* fix the documentation ??? |
*/ |
static unsigned char Cx86_dir0_msb __initdata = 0; |
static char Cx86_model[][9] __initdata = { |
"Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", |
"M II ", "Unknown" |
}; |
static char Cx486_name[][5] __initdata = { |
"SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", |
"SRx2", "DRx2" |
}; |
static char Cx486S_name[][4] __initdata = { |
"S", "S2", "Se", "S2e" |
}; |
static char Cx486D_name[][4] __initdata = { |
"DX", "DX2", "?", "?", "?", "DX4" |
}; |
static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock"; |
static char cyrix_model_mult1[] __initdata = "12??43"; |
static char cyrix_model_mult2[] __initdata = "12233445"; |
/* |
* Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old |
* BIOSes for compatibility with DOS games. This makes the udelay loop |
* work correctly, and improves performance. |
* |
* FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP |
*/ |
extern void calibrate_delay(void) __init; |
static void __init check_cx686_slop(struct cpuinfo_x86 *c) |
{ |
unsigned long flags; |
if (Cx86_dir0_msb == 3) { |
unsigned char ccr3, ccr5; |
local_irq_save(flags); |
ccr3 = getCx86(CX86_CCR3); |
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
ccr5 = getCx86(CX86_CCR5); |
if (ccr5 & 2) |
setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */ |
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
local_irq_restore(flags); |
/*if (ccr5 & 2) { // possible wrong calibration done |
printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n"); |
//!!!calibrate_delay(); |
c->loops_per_jiffy = loops_per_jiffy; |
}*/ |
} |
} |
static void __init set_cx86_reorder(void) |
{ |
u8 ccr3; |
printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n"); |
ccr3 = getCx86(CX86_CCR3); |
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
/* Load/Store Serialize to mem access disable (=reorder it) */ |
setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80); |
/* set load/store serialize from 1GB to 4GB */ |
ccr3 |= 0xe0; |
setCx86(CX86_CCR3, ccr3); |
} |
static void __init set_cx86_memwb(void) |
{ |
u32 cr0; |
printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); |
/* CCR2 bit 2: unlock NW bit */ |
setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); |
/* set 'Not Write-through' */ |
cr0 = 0x20000000; |
__asm__("movl %%cr0,%%eax\n\t" |
"orl %0,%%eax\n\t" |
"movl %%eax,%%cr0\n" |
: : "r" (cr0) |
:"ax"); |
/* CCR2 bit 2: lock NW bit and set WT1 */ |
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 ); |
} |
static void __init set_cx86_inc(void) |
{ |
unsigned char ccr3; |
printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n"); |
ccr3 = getCx86(CX86_CCR3); |
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
/* PCR1 -- Performance Control */ |
/* Incrementor on, whatever that is */ |
setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02); |
/* PCR0 -- Performance Control */ |
/* Incrementor Margin 10 */ |
setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); |
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
} |
/* |
* Configure later MediaGX and/or Geode processor. |
*/ |
static void __init geode_configure(void) |
{ |
unsigned long flags; |
u8 ccr3, ccr4; |
local_irq_save(flags); |
ccr3 = getCx86(CX86_CCR3); |
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* Enable */ |
ccr4 = getCx86(CX86_CCR4); |
ccr4 |= 0x38; /* FPU fast, DTE cache, Mem bypass */ |
setCx86(CX86_CCR3, ccr3); |
set_cx86_memwb(); |
set_cx86_reorder(); |
set_cx86_inc(); |
local_irq_restore(flags); |
} |
static void __init init_cyrix(struct cpuinfo_x86 *c) |
{ |
unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; |
char *buf = c->x86_model_id; |
const char *p = NULL; |
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ |
clear_bit(0*32+31, c->x86_capability); |
/* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */ |
if ( test_bit(1*32+24, c->x86_capability) ) { |
clear_bit(1*32+24, c->x86_capability); |
set_bit(X86_FEATURE_CXMMX, c->x86_capability); |
} |
do_cyrix_devid(&dir0, &dir1); |
check_cx686_slop(c); |
Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family" */ |
dir0_lsn = dir0 & 0xf; /* model or clock multiplier */ |
/* common case step number/rev -- exceptions handled below */ |
c->x86_model = (dir1 >> 4) + 1; |
c->x86_mask = dir1 & 0xf; |
/* Now cook; the original recipe is by Channing Corn, from Cyrix. |
* We do the same thing for each generation: we work out |
* the model, multiplier and stepping. Black magic included, |
* to make the silicon step/rev numbers match the printed ones. |
*/ |
switch (dir0_msn) { |
unsigned char tmp; |
case 0: /* Cx486SLC/DLC/SRx/DRx */ |
p = Cx486_name[dir0_lsn & 7]; |
break; |
case 1: /* Cx486S/DX/DX2/DX4 */ |
p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5] |
: Cx486S_name[dir0_lsn & 3]; |
break; |
case 2: /* 5x86 */ |
Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5]; |
p = Cx86_cb+2; |
break; |
case 3: /* 6x86/6x86L */ |
Cx86_cb[1] = ' '; |
Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5]; |
if (dir1 > 0x21) { /* 686L */ |
Cx86_cb[0] = 'L'; |
p = Cx86_cb; |
(c->x86_model)++; |
} else /* 686 */ |
p = Cx86_cb+1; |
/* Emulate MTRRs using Cyrix's ARRs. */ |
set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability); |
/* 6x86's contain this bug */ |
c->coma_bug = 1; |
break; |
case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */ |
#ifdef CONFIG_PCI |
/* It isn't really a PCI quirk directly, but the cure is the |
same. The MediaGX has deep magic SMM stuff that handles the |
SB emulation. It thows away the fifo on disable_dma() which |
is wrong and ruins the audio. |
Bug2: VSA1 has a wrap bug so that using maximum sized DMA |
causes bad things. According to NatSemi VSA2 has another |
bug to do with 'hlt'. I've not seen any boards using VSA2 |
and X doesn't seem to support it either so who cares 8). |
VSA1 we work around however. |
*/ |
printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n"); |
isa_dma_bridge_buggy = 2; |
#endif |
c->x86_cache_size=16; /* Yep 16K integrated cache thats it */ |
/* |
* The 5510/5520 companion chips have a funky PIT. |
*/ |
/*!!!if (pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, NULL) || |
pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, NULL)) |
pit_latch_buggy = 1;*/ |
/* GXm supports extended cpuid levels 'ala' AMD */ |
if (c->cpuid_level == 2) { |
/* Enable cxMMX extensions (GX1 Datasheet 54) */ |
setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); |
/* GXlv/GXm/GX1 */ |
if((dir1 >= 0x50 && dir1 <= 0x54) || dir1 >= 0x63) |
geode_configure(); |
get_model_name(c); /* get CPU marketing name */ |
printk("Trovato GX CPU!!!.\n"); |
return; |
} |
else { /* MediaGX */ |
Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; |
p = Cx86_cb+2; |
c->x86_model = (dir1 & 0x20) ? 1 : 2; |
} |
break; |
case 5: /* 6x86MX/M II */ |
if (dir1 > 7) |
{ |
dir0_msn++; /* M II */ |
/* Enable MMX extensions (App note 108) */ |
setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); |
} |
else |
{ |
c->coma_bug = 1; /* 6x86MX, it has the bug. */ |
} |
tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; |
Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; |
p = Cx86_cb+tmp; |
if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20)) |
(c->x86_model)++; |
/* Emulate MTRRs using Cyrix's ARRs. */ |
set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability); |
break; |
case 0xf: /* Cyrix 486 without DEVID registers */ |
switch (dir0_lsn) { |
case 0xd: /* either a 486SLC or DLC w/o DEVID */ |
dir0_msn = 0; |
p = Cx486_name[(c->hard_math) ? 1 : 0]; |
break; |
case 0xe: /* a 486S A step */ |
dir0_msn = 0; |
p = Cx486S_name[0]; |
break; |
} |
break; |
default: /* unknown (shouldn't happen, we know everyone ;-) */ |
dir0_msn = 7; |
break; |
} |
strcpy(buf, Cx86_model[dir0_msn & 7]); |
if (p) strcat(buf, p); |
printk("Trovato CPU!!!.\n"); |
return; |
} |
/* |
* Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected |
* by the fact that they preserve the flags across the division of 5/2. |
* PII and PPro exhibit this behavior too, but they have cpuid available. |
*/ |
/* |
* Perform the Cyrix 5/2 test. A Cyrix won't change |
* the flags, while other 486 chips will. |
*/ |
static inline int test_cyrix_52div(void) |
{ |
unsigned int test; |
__asm__ __volatile__( |
"sahf\n\t" /* clear flags (%eax = 0x0005) */ |
"div %b2\n\t" /* divide 5 by 2 */ |
"lahf" /* store flags into %ah */ |
: "=a" (test) |
: "0" (5), "q" (2) |
: "cc"); |
/* AH is 0x02 on Cyrix after the divide.. */ |
return (unsigned char) (test >> 8) == 0x02; |
} |
static void cyrix_identify(struct cpuinfo_x86 * c) |
{ |
/* Detect Cyrix with disabled CPUID */ |
if ( c->x86 == 4 && test_cyrix_52div() ) { |
unsigned char dir0, dir1; |
strcpy(c->x86_vendor_id, "CyrixInstead"); |
c->x86_vendor = X86_VENDOR_CYRIX; |
/* Actually enable cpuid on the older cyrix */ |
/* Retrieve CPU revisions */ |
do_cyrix_devid(&dir0, &dir1); |
dir0>>=4; |
/* Check it is an affected model */ |
if (dir0 == 5 || dir0 == 3) |
{ |
unsigned char ccr3, ccr4; |
unsigned long flags; |
printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); |
local_irq_save(flags); |
ccr3 = getCx86(CX86_CCR3); |
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
ccr4 = getCx86(CX86_CCR4); |
setCx86(CX86_CCR4, ccr4 | 0x80); /* enable cpuid */ |
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
local_irq_restore(flags); |
} |
} |
generic_identify(c); |
} |
static struct cpu_dev cyrix_cpu_dev __initdata = { |
.c_vendor = "Cyrix", |
.c_ident = { "CyrixInstead" }, |
.c_init = init_cyrix, |
.c_identify = cyrix_identify, |
}; |
int __init cyrix_init_cpu(void) |
{ |
cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev; |
return 0; |
} |
//early_arch_initcall(cyrix_init_cpu); |
static struct cpu_dev nsc_cpu_dev __initdata = { |
.c_vendor = "NSC", |
.c_ident = { "Geode by NSC" }, |
.c_init = init_cyrix, |
.c_identify = generic_identify, |
}; |
int __init nsc_init_cpu(void) |
{ |
cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev; |
return 0; |
} |
//early_arch_initcall(nsc_init_cpu); |
/shark/trunk/drivers/cpu/amd.c |
---|
0,0 → 1,234 |
#include <linuxcomp.h> |
#include <linux/init.h> |
#include <linux/bitops.h> |
#include <linux/mm.h> |
#include <asm/io.h> |
#include <asm/processor.h> |
#include "cpu.h" |
/* |
* B step AMD K6 before B 9730xxxx have hardware bugs that can cause |
* misexecution of code under Linux. Owners of such processors should |
* contact AMD for precise details and a CPU swap. |
* |
* See http://www.multimania.com/poulot/k6bug.html |
* http://www.amd.com/K6/k6docs/revgd.html |
* |
* The following test is erm.. interesting. AMD neglected to up |
* the chip setting when fixing the bug but they also tweaked some |
* performance at the same time.. |
*/ |
extern void vide(void); |
__asm__(".align 4\nvide: ret"); |
static void __init init_amd(struct cpuinfo_x86 *c) |
{ |
u32 l, h; |
int mbytes = 1; //!!!num_physpages >> (20-PAGE_SHIFT); |
int r; |
/* |
* FIXME: We should handle the K5 here. Set up the write |
* range and also turn on MSR 83 bits 4 and 31 (write alloc, |
* no bus pipeline) |
*/ |
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ |
clear_bit(0*32+31, c->x86_capability); |
r = get_model_name(c); |
switch(c->x86) |
{ |
case 4: |
/* |
* General Systems BIOSen alias the cpu frequency registers |
* of the Elan at 0x000df000. Unfortuantly, one of the Linux |
* drivers subsequently pokes it, and changes the CPU speed. |
* Workaround : Remove the unneeded alias. |
*/ |
#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ |
#define CBAR_ENB (0x80000000) |
#define CBAR_KEY (0X000000CB) |
if (c->x86_model==9 || c->x86_model == 10) { |
if (inl (CBAR) & CBAR_ENB) |
outl (0 | CBAR_KEY, CBAR); |
} |
break; |
case 5: |
if( c->x86_model < 6 ) |
{ |
/* Based on AMD doc 20734R - June 2000 */ |
if ( c->x86_model == 0 ) { |
clear_bit(X86_FEATURE_APIC, c->x86_capability); |
set_bit(X86_FEATURE_PGE, c->x86_capability); |
} |
break; |
} |
if ( c->x86_model == 6 && c->x86_mask == 1 ) { |
const int K6_BUG_LOOP = 1000000; |
int n; |
void (*f_vide)(void); |
unsigned long d, d2; |
printk(KERN_INFO "AMD K6 stepping B detected - "); |
/* |
* It looks like AMD fixed the 2.6.2 bug and improved indirect |
* calls at the same time. |
*/ |
n = K6_BUG_LOOP; |
f_vide = vide; |
rdtscl(d); |
while (n--) |
f_vide(); |
rdtscl(d2); |
d = d2-d; |
/* Knock these two lines out if it debugs out ok */ |
printk(KERN_INFO "AMD K6 stepping B detected - "); |
/* -- cut here -- */ |
if (d > 20*K6_BUG_LOOP) |
printk("system stability may be impaired when more than 32 MB are used.\n"); |
else |
printk("probably OK (after B9730xxxx).\n"); |
printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); |
} |
/* K6 with old style WHCR */ |
if (c->x86_model < 8 || |
(c->x86_model== 8 && c->x86_mask < 8)) { |
/* We can only write allocate on the low 508Mb */ |
if(mbytes>508) |
mbytes=508; |
rdmsr(MSR_K6_WHCR, l, h); |
if ((l&0x0000FFFF)==0) { |
unsigned long flags; |
l=(1<<0)|((mbytes/4)<<1); |
local_irq_save(flags); |
wbinvd(); |
wrmsr(MSR_K6_WHCR, l, h); |
local_irq_restore(flags); |
printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", |
mbytes); |
} |
break; |
} |
if ((c->x86_model == 8 && c->x86_mask >7) || |
c->x86_model == 9 || c->x86_model == 13) { |
/* The more serious chips .. */ |
if(mbytes>4092) |
mbytes=4092; |
rdmsr(MSR_K6_WHCR, l, h); |
if ((l&0xFFFF0000)==0) { |
unsigned long flags; |
l=((mbytes>>2)<<22)|(1<<16); |
local_irq_save(flags); |
wbinvd(); |
wrmsr(MSR_K6_WHCR, l, h); |
local_irq_restore(flags); |
printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", |
mbytes); |
} |
/* Set MTRR capability flag if appropriate */ |
if (c->x86_model == 13 || c->x86_model == 9 || |
(c->x86_model == 8 && c->x86_mask >= 8)) |
set_bit(X86_FEATURE_K6_MTRR, c->x86_capability); |
break; |
} |
break; |
case 6: /* An Athlon/Duron */ |
/* Bit 15 of Athlon specific MSR 15, needs to be 0 |
* to enable SSE on Palomino/Morgan/Barton CPU's. |
* If the BIOS didn't enable it already, enable it here. |
*/ |
if (c->x86_model >= 6 && c->x86_model <= 10) { |
if (!cpu_has(c, X86_FEATURE_XMM)) { |
printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); |
rdmsr(MSR_K7_HWCR, l, h); |
l &= ~0x00008000; |
wrmsr(MSR_K7_HWCR, l, h); |
set_bit(X86_FEATURE_XMM, c->x86_capability); |
} |
} |
/* It's been determined by AMD that Athlons since model 8 stepping 1 |
* are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx |
* As per AMD technical note 27212 0.2 |
*/ |
if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) { |
rdmsr(MSR_K7_CLK_CTL, l, h); |
if ((l & 0xfff00000) != 0x20000000) { |
printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, |
((l & 0x000fffff)|0x20000000)); |
wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); |
} |
} |
break; |
} |
switch (c->x86) { |
case 15: |
set_bit(X86_FEATURE_K8, c->x86_capability); |
break; |
case 6: |
set_bit(X86_FEATURE_K7, c->x86_capability); |
break; |
} |
display_cacheinfo(c); |
} |
static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) |
{ |
/* AMD errata T13 (order #21922) */ |
if ((c->x86 == 6)) { |
if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ |
size = 64; |
if (c->x86_model == 4 && |
(c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */ |
size = 256; |
} |
return size; |
} |
static struct cpu_dev amd_cpu_dev __initdata = { |
.c_vendor = "AMD", |
.c_ident = { "AuthenticAMD" }, |
.c_models = { |
{ .vendor = X86_VENDOR_AMD, .family = 4, .model_names = |
{ |
[3] = "486 DX/2", |
[7] = "486 DX/2-WB", |
[8] = "486 DX/4", |
[9] = "486 DX/4-WB", |
[14] = "Am5x86-WT", |
[15] = "Am5x86-WB" |
} |
}, |
}, |
.c_init = init_amd, |
.c_identify = generic_identify, |
.c_size_cache = amd_size_cache, |
}; |
int __init amd_init_cpu(void) |
{ |
cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; |
return 0; |
} |
//early_arch_initcall(amd_init_cpu); |
/shark/trunk/drivers/cpu/include/drivers/shark_cpu26.h |
---|
0,0 → 1,44 |
/* |
* Project: S.Ha.R.K. |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors : |
* Mauro Marinoni <mauro.marinoni@unipv.it> |
* |
* |
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
*/ |
/* Glue Layer Header Linux CPU Driver*/ |
#ifndef __SHARK_CPU26_H__ |
#define __SHARK_CPU26_H__ |
#define DVS_NONE 0 |
#define DVS_POWERNOW_K6 1 |
#define DVS_POWERNOW_K7 2 |
#define DVS_POWERNOW_K8 3 |
#define DVS_MEDIAGX_GEODE 4 |
#define DVS_P4_CLOCK_MOD 5 |
#define DVS_SS_CENTRINO 6 |
#define DVS_SS_ICH 7 |
#define DVS_SS_SMI 8 |
int CPU26_installed(void); |
int CPU26_init(void); |
int CPU26_close(void); |
void CPU26_showinfo(void); |
int CPU26_initDVS(void); |
int CPU26_closeDVS(void); |
#endif |
/shark/trunk/drivers/cpu/cpufreq/powernow-k6.c |
---|
0,0 → 1,239 |
/* |
* This file was based upon code in Powertweak Linux (http://powertweak.sf.net) |
* (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne P�k�� Dominik Brodowski. |
* |
* Licensed under the terms of the GNU GPL License version 2. |
* |
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* |
*/ |
#include <linuxcomp.h> |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/init.h> |
#include <linux/cpufreq.h> |
#include <linux/ioport.h> |
#include <linux/slab.h> |
#include <asm/msr.h> |
#include <asm/timex.h> |
#include <asm/io.h> |
extern struct cpuinfo_x86 new_cpu_data; |
extern unsigned long cpu_khz; |
#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long |
as it is unused */ |
static unsigned int busfreq; /* FSB, in 10 kHz */ |
static unsigned int max_multiplier; |
/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */ |
static struct cpufreq_frequency_table clock_ratio[] = { |
{45, /* 000 -> 4.5x */ 0}, |
{50, /* 001 -> 5.0x */ 0}, |
{40, /* 010 -> 4.0x */ 0}, |
{55, /* 011 -> 5.5x */ 0}, |
{20, /* 100 -> 2.0x */ 0}, |
{30, /* 101 -> 3.0x */ 0}, |
{60, /* 110 -> 6.0x */ 0}, |
{35, /* 111 -> 3.5x */ 0}, |
{0, CPUFREQ_TABLE_END} |
}; |
/** |
* powernow_k6_get_cpu_multiplier - returns the current FSB multiplier |
* |
* Returns the current setting of the frequency multiplier. Core clock |
* speed is frequency of the Front-Side Bus multiplied with this value. |
*/ |
static int powernow_k6_get_cpu_multiplier(void) |
{ |
u64 invalue = 0; |
u32 msrval; |
msrval = POWERNOW_IOPORT + 0x1; |
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ |
invalue=inl(POWERNOW_IOPORT + 0x8); |
msrval = POWERNOW_IOPORT + 0x0; |
wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ |
return clock_ratio[(invalue >> 5)&7].index; |
} |
/** |
* powernow_k6_set_state - set the PowerNow! multiplier |
* @best_i: clock_ratio[best_i] is the target multiplier |
* |
* Tries to change the PowerNow! multiplier |
*/ |
static void powernow_k6_set_state (unsigned int best_i) |
{ |
unsigned long outvalue=0, invalue=0; |
unsigned long msrval; |
struct cpufreq_freqs freqs; |
if (clock_ratio[best_i].index > max_multiplier) { |
printk(KERN_ERR "cpufreq: invalid target frequency\n"); |
return; |
} |
freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); |
freqs.new = busfreq * clock_ratio[best_i].index; |
freqs.cpu = 0; // powernow-k6.c is UP only driver |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
/* we now need to transform best_i to the BVC format, see AMD#23446 */ |
outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5); |
msrval = POWERNOW_IOPORT + 0x1; |
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ |
invalue=inl(POWERNOW_IOPORT + 0x8); |
invalue = invalue & 0xf; |
outvalue = outvalue | invalue; |
outl(outvalue ,(POWERNOW_IOPORT + 0x8)); |
msrval = POWERNOW_IOPORT + 0x0; |
wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
return; |
} |
/** |
* powernow_k6_verify - verifies a new CPUfreq policy |
* @policy: new policy |
* |
* Policy must be within lowest and highest possible CPU Frequency, |
* and at least one possible state must be within min and max. |
*/ |
static int powernow_k6_verify(struct cpufreq_policy *policy) |
{ |
return cpufreq_frequency_table_verify(policy, &clock_ratio[0]); |
} |
/** |
* powernow_k6_setpolicy - sets a new CPUFreq policy |
* @policy - new policy |
* |
* sets a new CPUFreq policy |
*/ |
static int powernow_k6_target (struct cpufreq_policy *policy, |
unsigned int target_freq, |
unsigned int relation) |
{ |
unsigned int newstate = 0; |
if (cpufreq_frequency_table_target(policy, &clock_ratio[0], target_freq, relation, &newstate)) |
return -EINVAL; |
powernow_k6_set_state(newstate); |
return 0; |
} |
static int powernow_k6_cpu_init(struct cpufreq_policy *policy) |
{ |
unsigned int i; |
if (policy->cpu != 0) |
return -ENODEV; |
/* get frequencies */ |
max_multiplier = powernow_k6_get_cpu_multiplier(); |
busfreq = cpu_khz / max_multiplier; |
/* table init */ |
for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { |
if (clock_ratio[i].index > max_multiplier) |
clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; |
else |
clock_ratio[i].frequency = busfreq * clock_ratio[i].index; |
} |
/* cpuinfo and default policy values */ |
policy->governor = 0; //!!!CPUFREQ_DEFAULT_GOVERNOR; |
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
policy->cur = busfreq * max_multiplier; |
return cpufreq_frequency_table_cpuinfo(policy, &clock_ratio[0]); |
} |
static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) |
{ |
unsigned int i; |
for (i=0; i<8; i++) { |
if (i==max_multiplier) |
powernow_k6_set_state(i); |
} |
return 0; |
} |
static struct cpufreq_driver powernow_k6_driver = { |
.verify = powernow_k6_verify, |
.target = powernow_k6_target, |
.init = powernow_k6_cpu_init, |
.exit = powernow_k6_cpu_exit, |
.name = "powernow-k6", |
.owner = THIS_MODULE, |
}; |
/** |
* powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver |
* |
* Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported |
* devices, -EINVAL or -ENOMEM on problems during initiatization, and zero |
* on success. |
*/ |
/*static*/ int __init powernow_k6_init(void) |
{ |
struct cpuinfo_x86 *c = &new_cpu_data; |
if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) || |
((c->x86_model != 12) && (c->x86_model != 13))) |
return -ENODEV; |
if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) { |
printk("cpufreq: PowerNow IOPORT region already used.\n"); |
return -EIO; |
} |
if (cpufreq_register_driver(&powernow_k6_driver)) { |
release_region (POWERNOW_IOPORT, 16); |
return -EINVAL; |
} |
return 0; |
} |
/** |
* powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support |
* |
* Unregisters AMD K6-2+ / K6-3+ PowerNow! support. |
*/ |
/*static*/ void __exit powernow_k6_exit(void) |
{ |
cpufreq_unregister_driver(&powernow_k6_driver); |
release_region (POWERNOW_IOPORT, 16); |
} |
MODULE_AUTHOR ("Arjan van de Ven <arjanv@redhat.com>, Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>"); |
MODULE_DESCRIPTION ("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); |
MODULE_LICENSE ("GPL"); |
module_init(powernow_k6_init); |
module_exit(powernow_k6_exit); |
/shark/trunk/drivers/cpu/cpufreq/powernow-k7.c |
---|
0,0 → 1,431 |
/* |
* AMD K7 Powernow driver. |
* (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs. |
* (C) 2003 Dave Jones <davej@redhat.com> |
* |
* Licensed under the terms of the GNU GPL License version 2. |
* Based upon datasheets & sample CPUs kindly provided by AMD. |
* |
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* |
* |
* Errata 5: Processor may fail to execute a FID/VID change in presence of interrupt. |
* - We cli/sti on stepping A0 CPUs around the FID/VID transition. |
* Errata 15: Processors with half frequency multipliers may hang upon wakeup from disconnect. |
* - We disable half multipliers if ACPI is used on A0 stepping CPUs. |
*/ |
#include <linuxcomp.h> |
#include <linux/config.h> |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/init.h> |
#include <linux/cpufreq.h> |
#include <linux/slab.h> |
#include <linux/string.h> |
#include <asm/msr.h> |
#include <asm/timex.h> |
#include <asm/io.h> |
#include <asm/system.h> |
#include "powernow-k7.h" |
#define DEBUG |
#ifdef DEBUG |
#define dprintk(msg...) printk(msg) |
#else |
#define dprintk(msg...) do { } while(0) |
#endif |
#define PFX "powernow: " |
extern struct cpuinfo_x86 new_cpu_data; |
struct psb_s { |
u8 signature[10]; |
u8 tableversion; |
u8 flags; |
u16 settlingtime; |
u8 reserved1; |
u8 numpst; |
}; |
struct pst_s { |
u32 cpuid; |
u8 fsbspeed; |
u8 maxfid; |
u8 startvid; |
u8 numpstates; |
}; |
/* divide by 1000 to get VID. */ |
static int mobile_vid_table[32] = { |
2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, |
1600, 1550, 1500, 1450, 1400, 1350, 1300, 0, |
1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, |
1075, 1050, 1024, 1000, 975, 950, 925, 0, |
}; |
/* divide by 10 to get FID. */ |
static int fid_codes[32] = { |
110, 115, 120, 125, 50, 55, 60, 65, |
70, 75, 80, 85, 90, 95, 100, 105, |
30, 190, 40, 200, 130, 135, 140, 210, |
150, 225, 160, 165, 170, 180, -1, -1, |
}; |
static struct cpufreq_frequency_table *powernow_table; |
static unsigned int can_scale_bus; |
static unsigned int can_scale_vid; |
static unsigned int minimum_speed=-1; |
static unsigned int maximum_speed; |
static unsigned int number_scales; |
static unsigned int fsb; |
static unsigned int latency; |
static char have_a0; |
static int check_powernow(void) |
{ |
struct cpuinfo_x86 *c = &new_cpu_data; /* Nino */ |
unsigned int maxei, eax, ebx, ecx, edx; |
if (c->x86_vendor != X86_VENDOR_AMD) { |
dprintk (KERN_INFO PFX "AMD processor not detected.\n"); |
return 0; |
} |
if (c->x86 !=6) { |
dprintk (KERN_INFO PFX "This module only works with AMD K7 CPUs\n"); |
return 0; |
} |
printk (KERN_INFO PFX "AMD K7 CPU detected.\n"); |
if ((c->x86_model == 6) && (c->x86_mask == 0)) { |
dprintk (KERN_INFO PFX "K7 660[A0] core detected, enabling errata workarounds\n"); |
have_a0 = 1; |
} |
/* Get maximum capabilities */ |
maxei = cpuid_eax (0x80000000); |
if (maxei < 0x80000007) { /* Any powernow info ? */ |
printk (KERN_INFO PFX "No powernow capabilities detected\n"); |
return 0; |
} |
cpuid(0x80000007, &eax, &ebx, &ecx, &edx); |
printk (KERN_INFO PFX "PowerNOW! Technology present. Can scale: "); |
if (edx & 1 << 1) { |
printk ("frequency"); |
can_scale_bus=1; |
} |
if ((edx & (1 << 1 | 1 << 2)) == 0x6) |
printk (" and "); |
if (edx & 1 << 2) { |
printk ("voltage"); |
can_scale_vid=1; |
} |
if (!(edx & (1 << 1 | 1 << 2))) { |
printk ("nothing.\n"); |
return 0; |
} |
printk (".\n"); |
return 1; |
} |
static int get_ranges (unsigned char *pst) |
{ |
unsigned int j, speed; |
u8 fid, vid; |
powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL); |
if (!powernow_table) |
return -ENOMEM; |
memset(powernow_table, 0, (sizeof(struct cpufreq_frequency_table) * (number_scales + 1))); |
for (j=0 ; j < number_scales; j++) { |
fid = *pst++; |
powernow_table[j].frequency = fsb * fid_codes[fid] * 100; |
powernow_table[j].index = fid; /* lower 8 bits */ |
speed = fsb * (fid_codes[fid]/10); |
if ((fid_codes[fid] % 10)==5) { |
speed += fsb/2; |
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) |
if (have_a0 == 1) |
powernow_table[j].frequency = CPUFREQ_ENTRY_INVALID; |
#endif |
} |
dprintk (KERN_INFO PFX " FID: 0x%x (%d.%dx [%dMHz])\t", fid, |
fid_codes[fid] / 10, fid_codes[fid] % 10, speed); |
if (speed < minimum_speed) |
minimum_speed = speed; |
if (speed > maximum_speed) |
maximum_speed = speed; |
vid = *pst++; |
powernow_table[j].index |= (vid << 8); /* upper 8 bits */ |
dprintk ("VID: 0x%x (%d.%03dV)\n", vid, mobile_vid_table[vid]/1000, |
mobile_vid_table[vid]%1000); |
} |
dprintk ("\n"); |
powernow_table[number_scales].frequency = CPUFREQ_TABLE_END; |
powernow_table[number_scales].index = 0; |
return 0; |
} |
static void change_FID(int fid) |
{ |
union msr_fidvidctl fidvidctl; |
rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); |
if (fidvidctl.bits.FID != fid) { |
fidvidctl.bits.SGTC = latency; |
fidvidctl.bits.FID = fid; |
fidvidctl.bits.VIDC = 0; |
fidvidctl.bits.FIDC = 1; |
wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); |
} |
} |
static void change_VID(int vid) |
{ |
union msr_fidvidctl fidvidctl; |
rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); |
if (fidvidctl.bits.VID != vid) { |
fidvidctl.bits.SGTC = latency; |
fidvidctl.bits.VID = vid; |
fidvidctl.bits.FIDC = 0; |
fidvidctl.bits.VIDC = 1; |
wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); |
} |
} |
static void change_speed (unsigned int index) |
{ |
u8 fid, vid; |
struct cpufreq_freqs freqs; |
union msr_fidvidstatus fidvidstatus; |
int cfid; |
/* fid are the lower 8 bits of the index we stored into |
* the cpufreq frequency table in powernow_decode_bios, |
* vid are the upper 8 bits. |
*/ |
fid = powernow_table[index].index & 0xFF; |
vid = (powernow_table[index].index & 0xFF00) >> 8; |
freqs.cpu = 0; |
rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val); |
cfid = fidvidstatus.bits.CFID; |
freqs.old = fsb * fid_codes[cfid] * 100; |
freqs.new = powernow_table[index].frequency; |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
/* Now do the magic poking into the MSRs. */ |
if (have_a0 == 1) /* A0 errata 5 */ |
local_irq_disable(); |
if (freqs.old > freqs.new) { |
/* Going down, so change FID first */ |
change_FID(fid); |
change_VID(vid); |
} else { |
/* Going up, so change VID first */ |
change_VID(vid); |
change_FID(fid); |
} |
if (have_a0 == 1) |
local_irq_enable(); |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
} |
static int powernow_decode_bios (int maxfid, int startvid) |
{ |
struct psb_s *psb; |
struct pst_s *pst; |
struct cpuinfo_x86 *c = &new_cpu_data; /* Nino */ |
unsigned int i, j; |
unsigned char *p; |
unsigned int etuple; |
unsigned int ret; |
etuple = cpuid_eax(0x80000001); |
etuple &= 0xf00; |
etuple |= (c->x86_model<<4)|(c->x86_mask); |
for (i=0xC0000; i < 0xffff0 ; i+=16) { |
p = phys_to_virt(i); |
if (memcmp(p, "AMDK7PNOW!", 10) == 0){ |
dprintk (KERN_INFO PFX "Found PSB header at %p\n", p); |
psb = (struct psb_s *) p; |
dprintk (KERN_INFO PFX "Table version: 0x%x\n", psb->tableversion); |
if (psb->tableversion != 0x12) { |
printk (KERN_INFO PFX "Sorry, only v1.2 tables supported right now\n"); |
return -ENODEV; |
} |
dprintk (KERN_INFO PFX "Flags: 0x%x (", psb->flags); |
if ((psb->flags & 1)==0) { |
dprintk ("Mobile"); |
} else { |
dprintk ("Desktop"); |
} |
dprintk (" voltage regulator)\n"); |
latency = psb->settlingtime; |
if (latency < 100) { |
printk (KERN_INFO PFX "BIOS set settling time to %d microseconds." |
"Should be at least 100. Correcting.\n", latency); |
latency = 100; |
} |
dprintk (KERN_INFO PFX "Settling Time: %d microseconds.\n", psb->settlingtime); |
dprintk (KERN_INFO PFX "Has %d PST tables. (Only dumping ones relevant to this CPU).\n", psb->numpst); |
latency *= 100; /* SGTC needs to be in units of 10ns */ |
p += sizeof (struct psb_s); |
pst = (struct pst_s *) p; |
for (i = 0 ; i <psb->numpst; i++) { |
pst = (struct pst_s *) p; |
number_scales = pst->numpstates; |
if ((etuple == pst->cpuid) && (maxfid==pst->maxfid) && (startvid==pst->startvid)) |
{ |
dprintk (KERN_INFO PFX "PST:%d (@%p)\n", i, pst); |
dprintk (KERN_INFO PFX " cpuid: 0x%x\t", pst->cpuid); |
dprintk ("fsb: %d\t", pst->fsbspeed); |
dprintk ("maxFID: 0x%x\t", pst->maxfid); |
dprintk ("startvid: 0x%x\n", pst->startvid); |
fsb = pst->fsbspeed; |
ret = get_ranges ((char *) pst + sizeof (struct pst_s)); |
return ret; |
} else { |
p = (char *) pst + sizeof (struct pst_s); |
for (j=0 ; j < number_scales; j++) |
p+=2; |
} |
} |
printk (KERN_INFO PFX "No PST tables match this cpuid (0x%x)\n", etuple); |
printk ("This is indicative of a broken BIOS. Email davej@redhat.com\n"); |
return -EINVAL; |
} |
p++; |
} |
return -ENODEV; |
} |
static int powernow_target (struct cpufreq_policy *policy, |
unsigned int target_freq, |
unsigned int relation) |
{ |
unsigned int newstate; |
if (cpufreq_frequency_table_target(policy, powernow_table, target_freq, relation, &newstate)) |
return -EINVAL; |
change_speed(newstate); |
return 0; |
} |
static int powernow_verify (struct cpufreq_policy *policy) |
{ |
return cpufreq_frequency_table_verify(policy, powernow_table); |
} |
static int __init powernow_cpu_init (struct cpufreq_policy *policy) |
{ |
union msr_fidvidstatus fidvidstatus; |
int result; |
if (policy->cpu != 0) |
return -ENODEV; |
rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val); |
result = powernow_decode_bios(fidvidstatus.bits.MFID, fidvidstatus.bits.SVID); |
if (result) |
return result; |
printk (KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n", |
minimum_speed, maximum_speed); |
policy->governor = 0; //!!!CPUFREQ_DEFAULT_GOVERNOR; |
policy->cpuinfo.transition_latency = latency; |
policy->cur = maximum_speed; |
return cpufreq_frequency_table_cpuinfo(policy, powernow_table); |
} |
static struct cpufreq_driver powernow_driver = { |
.verify = powernow_verify, |
.target = powernow_target, |
.init = powernow_cpu_init, |
.name = "powernow-k7", |
.owner = THIS_MODULE, |
}; |
/*static*/ int __init powernow_init (void) |
{ |
/*!!!if (dmi_broken & BROKEN_CPUFREQ) { |
printk (KERN_INFO PFX "Disabled at boot time by DMI,\n"); |
return -ENODEV; |
}*/ |
if (check_powernow()==0) |
return -ENODEV; |
return cpufreq_register_driver(&powernow_driver); |
} |
/*static*/ void __exit powernow_exit (void) |
{ |
cpufreq_unregister_driver(&powernow_driver); |
if (powernow_table) |
kfree(powernow_table); |
} |
MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); |
MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors."); |
MODULE_LICENSE ("GPL"); |
module_init(powernow_init); |
module_exit(powernow_exit); |
/shark/trunk/drivers/cpu/cpufreq/speedstep-lib.c |
---|
0,0 → 1,277 |
/* |
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
* |
* Licensed under the terms of the GNU GPL License version 2. |
* |
* Library for common functions for Intel SpeedStep v.1 and v.2 support |
* |
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* |
*/ |
#include <linuxcomp.h> |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/init.h> |
#include <linux/cpufreq.h> |
#include <linux/pci.h> |
#include <linux/slab.h> |
#include <asm/msr.h> |
#include "speedstep-lib.h" |
/* DEBUG |
* Define it if you want verbose debug output, e.g. for bug reporting |
*/ |
#define SPEEDSTEP_DEBUG |
#ifdef SPEEDSTEP_DEBUG |
#define dprintk(msg...) printk(msg) |
#else |
#define dprintk(msg...) do { } while(0) |
#endif |
/********************************************************************* |
* GET PROCESSOR CORE SPEED IN KHZ * |
*********************************************************************/ |
static unsigned int pentium3_get_frequency (unsigned int processor) |
{ |
/* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ |
struct { |
unsigned int ratio; /* Frequency Multiplier (x10) */ |
u8 bitmap; /* power on configuration bits |
[27, 25:22] (in MSR 0x2a) */ |
} msr_decode_mult [] = { |
{ 30, 0x01 }, |
{ 35, 0x05 }, |
{ 40, 0x02 }, |
{ 45, 0x06 }, |
{ 50, 0x00 }, |
{ 55, 0x04 }, |
{ 60, 0x0b }, |
{ 65, 0x0f }, |
{ 70, 0x09 }, |
{ 75, 0x0d }, |
{ 80, 0x0a }, |
{ 85, 0x26 }, |
{ 90, 0x20 }, |
{ 100, 0x2b }, |
{ 0, 0xff } /* error or unknown value */ |
}; |
/* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ |
struct { |
unsigned int value; /* Front Side Bus speed in MHz */ |
u8 bitmap; /* power on configuration bits [18: 19] |
(in MSR 0x2a) */ |
} msr_decode_fsb [] = { |
{ 66, 0x0 }, |
{ 100, 0x2 }, |
{ 133, 0x1 }, |
{ 0, 0xff} |
}; |
u32 msr_lo, msr_tmp; |
int i = 0, j = 0; |
/* read MSR 0x2a - we only need the low 32 bits */ |
rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); |
dprintk(KERN_DEBUG "speedstep-lib: P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); |
msr_tmp = msr_lo; |
/* decode the FSB */ |
msr_tmp &= 0x00c0000; |
msr_tmp >>= 18; |
while (msr_tmp != msr_decode_fsb[i].bitmap) { |
if (msr_decode_fsb[i].bitmap == 0xff) |
return 0; |
i++; |
} |
/* decode the multiplier */ |
if (processor == SPEEDSTEP_PROCESSOR_PIII_C_EARLY) |
msr_lo &= 0x03c00000; |
else |
msr_lo &= 0x0bc00000; |
msr_lo >>= 22; |
while (msr_lo != msr_decode_mult[j].bitmap) { |
if (msr_decode_mult[j].bitmap == 0xff) |
return 0; |
j++; |
} |
return (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100); |
} |
static unsigned int pentium4_get_frequency(void) |
{ |
u32 msr_lo, msr_hi; |
rdmsr(0x2c, msr_lo, msr_hi); |
dprintk(KERN_DEBUG "speedstep-lib: P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); |
msr_lo >>= 24; |
return (msr_lo * 100000); |
} |
unsigned int speedstep_get_processor_frequency(unsigned int processor) |
{ |
switch (processor) { |
case SPEEDSTEP_PROCESSOR_P4M: |
return pentium4_get_frequency(); |
case SPEEDSTEP_PROCESSOR_PIII_T: |
case SPEEDSTEP_PROCESSOR_PIII_C: |
case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: |
return pentium3_get_frequency(processor); |
default: |
return 0; |
}; |
return 0; |
} |
EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency); |
/********************************************************************* |
* DETECT SPEEDSTEP-CAPABLE PROCESSOR * |
*********************************************************************/ |
unsigned int speedstep_detect_processor (void) |
{ |
struct cpuinfo_x86 *c = cpu_data; |
u32 ebx, msr_lo, msr_hi; |
if ((c->x86_vendor != X86_VENDOR_INTEL) || |
((c->x86 != 6) && (c->x86 != 0xF))) |
return 0; |
if (c->x86 == 0xF) { |
/* Intel Mobile Pentium 4-M |
* or Intel Mobile Pentium 4 with 533 MHz FSB */ |
if (c->x86_model != 2) |
return 0; |
if ((c->x86_mask != 4) && /* B-stepping [M-P4-M] */ |
(c->x86_mask != 7) && /* C-stepping [M-P4-M] */ |
(c->x86_mask != 9)) /* D-stepping [M-P4-M or M-P4/533] */ |
return 0; |
ebx = cpuid_ebx(0x00000001); |
ebx &= 0x000000FF; |
if ((ebx != 0x0e) && (ebx != 0x0f)) |
return 0; |
return SPEEDSTEP_PROCESSOR_P4M; |
} |
switch (c->x86_model) { |
case 0x0B: /* Intel PIII [Tualatin] */ |
/* cpuid_ebx(1) is 0x04 for desktop PIII, |
0x06 for mobile PIII-M */ |
ebx = cpuid_ebx(0x00000001); |
ebx &= 0x000000FF; |
if (ebx != 0x06) |
return 0; |
/* So far all PIII-M processors support SpeedStep. See |
* Intel's 24540640.pdf of June 2003 |
*/ |
return SPEEDSTEP_PROCESSOR_PIII_T; |
case 0x08: /* Intel PIII [Coppermine] */ |
/* all mobile PIII Coppermines have FSB 100 MHz |
* ==> sort out a few desktop PIIIs. */ |
rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); |
dprintk(KERN_DEBUG "cpufreq: Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi); |
msr_lo &= 0x00c0000; |
if (msr_lo != 0x0080000) |
return 0; |
/* |
* If the processor is a mobile version, |
* platform ID has bit 50 set |
* it has SpeedStep technology if either |
* bit 56 or 57 is set |
*/ |
rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); |
dprintk(KERN_DEBUG "cpufreq: Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", msr_lo, msr_hi); |
if ((msr_hi & (1<<18)) && (msr_hi & (3<<24))) { |
if (c->x86_mask == 0x01) |
return SPEEDSTEP_PROCESSOR_PIII_C_EARLY; |
else |
return SPEEDSTEP_PROCESSOR_PIII_C; |
} |
default: |
return 0; |
} |
} |
EXPORT_SYMBOL_GPL(speedstep_detect_processor); |
/********************************************************************* |
* DETECT SPEEDSTEP SPEEDS * |
*********************************************************************/ |
unsigned int speedstep_get_freqs(unsigned int processor, |
unsigned int *low_speed, |
unsigned int *high_speed, |
void (*set_state) (unsigned int state, |
unsigned int notify) |
) |
{ |
unsigned int prev_speed; |
unsigned int ret = 0; |
unsigned long flags; |
if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) |
return -EINVAL; |
/* get current speed */ |
prev_speed = speedstep_get_processor_frequency(processor); |
if (!prev_speed) |
return -EIO; |
local_irq_save(flags); |
/* switch to low state */ |
set_state(SPEEDSTEP_LOW, 0); |
*low_speed = speedstep_get_processor_frequency(processor); |
if (!*low_speed) { |
ret = -EIO; |
goto out; |
} |
/* switch to high state */ |
set_state(SPEEDSTEP_HIGH, 0); |
*high_speed = speedstep_get_processor_frequency(processor); |
if (!*high_speed) { |
ret = -EIO; |
goto out; |
} |
if (*low_speed == *high_speed) { |
ret = -ENODEV; |
goto out; |
} |
/* switch to previous state, if necessary */ |
if (*high_speed != prev_speed) |
set_state(SPEEDSTEP_LOW, 0); |
out: |
local_irq_restore(flags); |
return (ret); |
} |
EXPORT_SYMBOL_GPL(speedstep_get_freqs); |
MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>"); |
MODULE_DESCRIPTION ("Library for Intel SpeedStep 1 or 2 cpufreq drivers."); |
MODULE_LICENSE ("GPL"); |
/shark/trunk/drivers/cpu/cpufreq/powernow-k8.c |
---|
0,0 → 1,999 |
/* |
* (c) 2003 Advanced Micro Devices, Inc. |
* Your use of this code is subject to the terms and conditions of the |
* GNU general public license version 2. See "../../../COPYING" or |
* http://www.gnu.org/licenses/gpl.html |
* |
* Support : paul.devriendt@amd.com |
* |
* Based on the powernow-k7.c module written by Dave Jones. |
* (C) 2003 Dave Jones <davej@codemonkey.ork.uk> on behalf of SuSE Labs |
* Licensed under the terms of the GNU GPL License version 2. |
* Based upon datasheets & sample CPUs kindly provided by AMD. |
* |
* Processor information obtained from Chapter 9 (Power and Thermal Management) |
* of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD |
* Opteron Processors", revision 3.03, available for download from www.amd.com |
* |
*/ |
#include <linuxcomp.h> |
#include <linux/kernel.h> |
#include <linux/smp.h> |
#include <linux/module.h> |
#include <linux/init.h> |
#include <linux/cpufreq.h> |
#include <linux/slab.h> |
#include <linux/string.h> |
#include <asm/msr.h> |
#include <asm/io.h> |
#include <asm/delay.h> |
#define PFX "powernow-k8: " |
#define BFX PFX "BIOS error: " |
#define VERSION "version 1.00.08 - September 26, 2003" |
#include "powernow-k8.h" |
#ifdef CONFIG_PREEMPT |
#warning this driver has not been tested on a preempt system |
#endif |
extern struct cpuinfo_x86 new_cpu_data; |
static u32 vstable; /* voltage stabalization time, from PSB, units 20 us */ |
static u32 plllock; /* pll lock time, from PSB, units 1 us */ |
static u32 numps; /* number of p-states, from PSB */ |
static u32 rvo; /* ramp voltage offset, from PSB */ |
static u32 irt; /* isochronous relief time, from PSB */ |
static u32 vidmvs; /* usable value calculated from mvs, from PSB */ |
struct pst_s *ppst; /* array of p states, valid for this part */ |
static u32 currvid; /* keep track of the current fid / vid */ |
static u32 currfid; |
/* |
The PSB table supplied by BIOS allows for the definition of the number of |
p-states that can be used when running on a/c, and the number of p-states |
that can be used when running on battery. This allows laptop manufacturers |
to force the system to save power when running from battery. The relationship |
is : |
1 <= number_of_battery_p_states <= maximum_number_of_p_states |
This driver does NOT have the support in it to detect transitions from |
a/c power to battery power, and thus trigger the transition to a lower |
p-state if required. This is because I need ACPI and the 2.6 kernel to do |
this, and this is a 2.4 kernel driver. Check back for a new improved driver |
for the 2.6 kernel soon. |
This code therefore assumes it is on battery at all times, and thus |
restricts performance to number_of_battery_p_states. For desktops, |
number_of_battery_p_states == maximum_number_of_pstates, |
so this is not actually a restriction. |
*/ |
static u32 batps; /* limit on the number of p states when on battery */ |
/* - set by BIOS in the PSB/PST */ |
static struct cpufreq_driver cpufreq_amd64_driver = { |
.verify = powernowk8_verify, |
.target = powernowk8_target, |
.init = powernowk8_cpu_init, |
.name = "cpufreq-amd64", |
.owner = THIS_MODULE, |
}; |
#define SEARCH_UP 1 |
#define SEARCH_DOWN 0 |
/* Return a frequency in MHz, given an input fid */ |
u32 find_freq_from_fid(u32 fid) |
{ |
return 800 + (fid * 100); |
} |
/* Return a fid matching an input frequency in MHz */ |
static u32 find_fid_from_freq(u32 freq) |
{ |
return (freq - 800) / 100; |
} |
/* Return the vco fid for an input fid */ |
static u32 convert_fid_to_vco_fid(u32 fid) |
{ |
if (fid < HI_FID_TABLE_BOTTOM) { |
return 8 + (2 * fid); |
} else { |
return fid; |
} |
} |
/* Sort the fid/vid frequency table into ascending order by fid. The spec */ |
/* implies that it will be sorted by BIOS, but, it only implies it, and I */ |
/* prefer not to trust when I can check. */ |
/* Yes, it is a simple bubble sort, but the PST is really small, so the */ |
/* choice of algorithm is pretty irrelevant. */ |
static inline void sort_pst(struct pst_s *ppst, u32 numpstates) |
{ |
u32 i; |
u8 tempfid; |
u8 tempvid; |
int swaps = 1; |
while (swaps) { |
swaps = 0; |
for (i = 0; i < (numpstates - 1); i++) { |
if (ppst[i].fid > ppst[i + 1].fid) { |
swaps = 1; |
tempfid = ppst[i].fid; |
tempvid = ppst[i].vid; |
ppst[i].fid = ppst[i + 1].fid; |
ppst[i].vid = ppst[i + 1].vid; |
ppst[i + 1].fid = tempfid; |
ppst[i + 1].vid = tempvid; |
} |
} |
} |
return; |
} |
/* Return 1 if the pending bit is set. Unless we are actually just told the */ |
/* processor to transition a state, seeing this bit set is really bad news. */ |
static inline int pending_bit_stuck(void) |
{ |
u32 lo; |
u32 hi; |
rdmsr(MSR_FIDVID_STATUS, lo, hi); |
return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; |
} |
/* Update the global current fid / vid values from the status msr. Returns 1 */ |
/* on error. */ |
static int query_current_values_with_pending_wait(void) |
{ |
u32 lo; |
u32 hi; |
u32 i = 0; |
lo = MSR_S_LO_CHANGE_PENDING; |
while (lo & MSR_S_LO_CHANGE_PENDING) { |
if (i++ > 0x1000000) { |
printk(KERN_ERR PFX "detected change pending stuck\n"); |
return 1; |
} |
rdmsr(MSR_FIDVID_STATUS, lo, hi); |
} |
currvid = hi & MSR_S_HI_CURRENT_VID; |
currfid = lo & MSR_S_LO_CURRENT_FID; |
return 0; |
} |
/* the isochronous relief time */ |
static inline void count_off_irt(void) |
{ |
udelay((1 << irt) * 10); |
return; |
} |
/* the voltage stabalization time */ |
static inline void count_off_vst(void) |
{ |
udelay(vstable * VST_UNITS_20US); |
return; |
} |
/* write the new fid value along with the other control fields to the msr */ |
static int write_new_fid(u32 fid) |
{ |
u32 lo; |
u32 savevid = currvid; |
if ((fid & INVALID_FID_MASK) || (currvid & INVALID_VID_MASK)) { |
printk(KERN_ERR PFX "internal error - overflow on fid write\n"); |
return 1; |
} |
lo = fid | (currvid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID; |
dprintk(KERN_DEBUG PFX "writing fid %x, lo %x, hi %x\n", |
fid, lo, plllock * PLL_LOCK_CONVERSION); |
wrmsr(MSR_FIDVID_CTL, lo, plllock * PLL_LOCK_CONVERSION); |
if (query_current_values_with_pending_wait()) |
return 1; |
count_off_irt(); |
if (savevid != currvid) { |
printk(KERN_ERR PFX |
"vid changed on fid transition, save %x, currvid %x\n", |
savevid, currvid); |
return 1; |
} |
if (fid != currfid) { |
printk(KERN_ERR PFX |
"fid transition failed, fid %x, currfid %x\n", |
fid, currfid); |
return 1; |
} |
return 0; |
} |
/* Write a new vid to the hardware */ |
static int write_new_vid(u32 vid) |
{ |
u32 lo; |
u32 savefid = currfid; |
if ((currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) { |
printk(KERN_ERR PFX "internal error - overflow on vid write\n"); |
return 1; |
} |
lo = currfid | (vid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID; |
dprintk(KERN_DEBUG PFX "writing vid %x, lo %x, hi %x\n", |
vid, lo, STOP_GRANT_5NS); |
wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); |
if (query_current_values_with_pending_wait()) { |
return 1; |
} |
if (savefid != currfid) { |
printk(KERN_ERR PFX |
"fid changed on vid transition, save %x currfid %x\n", |
savefid, currfid); |
return 1; |
} |
if (vid != currvid) { |
printk(KERN_ERR PFX |
"vid transition failed, vid %x, currvid %x\n", |
vid, currvid); |
return 1; |
} |
return 0; |
} |
/* Reduce the vid by the max of step or reqvid. */ |
/* Decreasing vid codes represent increasing voltages : */ |
/* vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of 0x1f is off. */ |
static int decrease_vid_code_by_step(u32 reqvid, u32 step) |
{ |
if ((currvid - reqvid) > step) |
reqvid = currvid - step; |
if (write_new_vid(reqvid)) |
return 1; |
count_off_vst(); |
return 0; |
} |
/* Change the fid and vid, by the 3 phases. */ |
static inline int transition_fid_vid(u32 reqfid, u32 reqvid) |
{ |
if (core_voltage_pre_transition(reqvid)) |
return 1; |
if (core_frequency_transition(reqfid)) |
return 1; |
if (core_voltage_post_transition(reqvid)) |
return 1; |
if (query_current_values_with_pending_wait()) |
return 1; |
if ((reqfid != currfid) || (reqvid != currvid)) { |
printk(KERN_ERR PFX "failed: req 0x%x 0x%x, curr 0x%x 0x%x\n", |
reqfid, reqvid, currfid, currvid); |
return 1; |
} |
dprintk(KERN_INFO PFX |
"transitioned: new fid 0x%x, vid 0x%x\n", currfid, currvid); |
return 0; |
} |
/* Phase 1 - core voltage transition ... setup appropriate voltage for the */ |
/* fid transition. */ |
static inline int core_voltage_pre_transition(u32 reqvid) |
{ |
u32 rvosteps = rvo; |
u32 savefid = currfid; |
dprintk(KERN_DEBUG PFX |
"ph1: start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo %x\n", |
currfid, currvid, reqvid, rvo); |
while (currvid > reqvid) { |
dprintk(KERN_DEBUG PFX "ph1: curr 0x%x, requesting vid 0x%x\n", |
currvid, reqvid); |
if (decrease_vid_code_by_step(reqvid, vidmvs)) |
return 1; |
} |
while (rvosteps > 0) { |
if (currvid == 0) { |
rvosteps = 0; |
} else { |
dprintk(KERN_DEBUG PFX |
"ph1: changing vid for rvo, requesting 0x%x\n", |
currvid - 1); |
if (decrease_vid_code_by_step(currvid - 1, 1)) |
return 1; |
rvosteps--; |
} |
} |
if (query_current_values_with_pending_wait()) |
return 1; |
if (savefid != currfid) { |
printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", currfid); |
return 1; |
} |
dprintk(KERN_DEBUG PFX "ph1 complete, currfid 0x%x, currvid 0x%x\n", |
currfid, currvid); |
return 0; |
} |
/* Phase 2 - core frequency transition */ |
static inline int core_frequency_transition(u32 reqfid) |
{ |
u32 vcoreqfid; |
u32 vcocurrfid; |
u32 vcofiddiff; |
u32 savevid = currvid; |
if ((reqfid < HI_FID_TABLE_BOTTOM) && (currfid < HI_FID_TABLE_BOTTOM)) { |
printk(KERN_ERR PFX "ph2 illegal lo-lo transition 0x%x 0x%x\n", |
reqfid, currfid); |
return 1; |
} |
if (currfid == reqfid) { |
printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", currfid); |
return 0; |
} |
dprintk(KERN_DEBUG PFX |
"ph2 starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n", |
currfid, currvid, reqfid); |
vcoreqfid = convert_fid_to_vco_fid(reqfid); |
vcocurrfid = convert_fid_to_vco_fid(currfid); |
vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid |
: vcoreqfid - vcocurrfid; |
while (vcofiddiff > 2) { |
if (reqfid > currfid) { |
if (currfid > LO_FID_TABLE_TOP) { |
if (write_new_fid(currfid + 2)) { |
return 1; |
} |
} else { |
if (write_new_fid |
(2 + convert_fid_to_vco_fid(currfid))) { |
return 1; |
} |
} |
} else { |
if (write_new_fid(currfid - 2)) |
return 1; |
} |
vcocurrfid = convert_fid_to_vco_fid(currfid); |
vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid |
: vcoreqfid - vcocurrfid; |
} |
if (write_new_fid(reqfid)) |
return 1; |
if (query_current_values_with_pending_wait()) |
return 1; |
if (currfid != reqfid) { |
printk(KERN_ERR PFX |
"ph2 mismatch, failed fid transition, curr %x, req %x\n", |
currfid, reqfid); |
return 1; |
} |
if (savevid != currvid) { |
printk(KERN_ERR PFX |
"ph2 vid changed, save %x, curr %x\n", savevid, |
currvid); |
return 1; |
} |
dprintk(KERN_DEBUG PFX "ph2 complete, currfid 0x%x, currvid 0x%x\n", |
currfid, currvid); |
return 0; |
} |
/* Phase 3 - core voltage transition flow ... jump to the final vid. */ |
static inline int core_voltage_post_transition(u32 reqvid) |
{ |
u32 savefid = currfid; |
u32 savereqvid = reqvid; |
dprintk(KERN_DEBUG PFX "ph3 starting, currfid 0x%x, currvid 0x%x\n", |
currfid, currvid); |
if (reqvid != currvid) { |
if (write_new_vid(reqvid)) |
return 1; |
if (savefid != currfid) { |
printk(KERN_ERR PFX |
"ph3: bad fid change, save %x, curr %x\n", |
savefid, currfid); |
return 1; |
} |
if (currvid != reqvid) { |
printk(KERN_ERR PFX |
"ph3: failed vid transition\n, req %x, curr %x", |
reqvid, currvid); |
return 1; |
} |
} |
if (query_current_values_with_pending_wait()) |
return 1; |
if (savereqvid != currvid) { |
dprintk(KERN_ERR PFX "ph3 failed, currvid 0x%x\n", currvid); |
return 1; |
} |
if (savefid != currfid) { |
dprintk(KERN_ERR PFX "ph3 failed, currfid changed 0x%x\n", |
currfid); |
return 1; |
} |
dprintk(KERN_DEBUG PFX "ph3 complete, currfid 0x%x, currvid 0x%x\n", |
currfid, currvid); |
return 0; |
} |
static inline int check_supported_cpu(void) |
{ |
struct cpuinfo_x86 *c = &new_cpu_data; |
u32 eax, ebx, ecx, edx; |
if (num_online_cpus() != 1) { |
dprintk(KERN_INFO PFX "multiprocessor systems not supported\n"); |
return 0; |
} |
if (c->x86_vendor != X86_VENDOR_AMD) { |
dprintk(KERN_INFO PFX "Not an AMD processor\n"); |
return 0; |
} |
eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); |
if ((eax & CPUID_XFAM_MOD) == ATHLON64_XFAM_MOD) { |
dprintk(KERN_DEBUG PFX "AMD Althon 64 Processor found\n"); |
if ((eax & CPUID_F1_STEP) < ATHLON64_REV_C0) { |
dprintk(KERN_INFO PFX "Revision C0 or better " |
"AMD Athlon 64 processor required\n"); |
return 0; |
} |
} else if ((eax & CPUID_XFAM_MOD) == OPTERON_XFAM_MOD) { |
dprintk(KERN_DEBUG PFX "AMD Opteron Processor found\n"); |
} else { |
dprintk(KERN_INFO PFX |
"AMD Athlon 64 or AMD Opteron processor required\n"); |
return 0; |
} |
eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); |
if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { |
dprintk(KERN_INFO PFX |
"No frequency change capabilities detected\n"); |
return 0; |
} |
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); |
if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) { |
dprintk(KERN_INFO PFX "Power state transitions not supported\n"); |
return 0; |
} |
printk(KERN_INFO PFX "Found AMD Athlon 64 / Opteron processor " |
"supporting p-state transitions\n"); |
return 1; |
} |
/* Find and validate the PSB/PST table in BIOS. */ |
static inline int find_psb_table(void) |
{ |
struct psb_s *psb; |
struct pst_s *pst; |
unsigned i, j; |
u32 lastfid; |
u32 mvs; |
u8 maxvid; |
for (i = 0xc0000; i < 0xffff0; i += 0x10) { |
/* Scan BIOS looking for the signature. */ |
/* It can not be at ffff0 - it is too big. */ |
psb = phys_to_virt(i); |
if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) |
continue; |
dprintk(KERN_DEBUG PFX "found PSB header at 0x%p\n", psb); |
dprintk(KERN_DEBUG PFX "table vers: 0x%x\n", psb->tableversion); |
if (psb->tableversion != PSB_VERSION_1_4) { |
printk(KERN_INFO BFX "PSB table is not v1.4\n"); |
return -ENODEV; |
} |
dprintk(KERN_DEBUG PFX "flags: 0x%x\n", psb->flags1); |
if (psb->flags1) { |
printk(KERN_ERR BFX "unknown flags\n"); |
return -ENODEV; |
} |
vstable = psb->voltagestabilizationtime; |
printk(KERN_INFO PFX "voltage stable time: %d (units 20us)\n", |
vstable); |
dprintk(KERN_DEBUG PFX "flags2: 0x%x\n", psb->flags2); |
rvo = psb->flags2 & 3; |
irt = ((psb->flags2) >> 2) & 3; |
mvs = ((psb->flags2) >> 4) & 3; |
vidmvs = 1 << mvs; |
batps = ((psb->flags2) >> 6) & 3; |
printk(KERN_INFO PFX "p states on battery: %d ", batps); |
switch (batps) { |
case 0: |
printk("- all available\n"); |
break; |
case 1: |
printk("- only the minimum\n"); |
break; |
case 2: |
printk("- only the 2 lowest\n"); |
break; |
case 3: |
printk("- only the 3 lowest\n"); |
break; |
} |
printk(KERN_INFO PFX "ramp voltage offset: %d\n", rvo); |
printk(KERN_INFO PFX "isochronous relief time: %d\n", irt); |
printk(KERN_INFO PFX "maximum voltage step: %d\n", mvs); |
dprintk(KERN_DEBUG PFX "numpst: 0x%x\n", psb->numpst); |
if (psb->numpst != 1) { |
printk(KERN_ERR BFX "numpst must be 1\n"); |
return -ENODEV; |
} |
dprintk(KERN_DEBUG PFX "cpuid: 0x%x\n", psb->cpuid); |
plllock = psb->plllocktime; |
printk(KERN_INFO PFX "pll lock time: 0x%x\n", plllock); |
maxvid = psb->maxvid; |
printk(KERN_INFO PFX "maxfid: 0x%x\n", psb->maxfid); |
printk(KERN_INFO PFX "maxvid: 0x%x\n", maxvid); |
numps = psb->numpstates; |
printk(KERN_INFO PFX "numpstates: 0x%x\n", numps); |
if (numps < 2) { |
printk(KERN_ERR BFX "no p states to transition\n"); |
return -ENODEV; |
} |
if (batps == 0) { |
batps = numps; |
} else if (batps > numps) { |
printk(KERN_ERR BFX "batterypstates > numpstates\n"); |
batps = numps; |
} else { |
printk(KERN_ERR PFX |
"Restricting operation to %d p-states\n", batps); |
printk(KERN_ERR PFX |
"Check for an updated driver to access all " |
"%d p-states\n", numps); |
} |
if ((numps <= 1) || (batps <= 1)) { |
printk(KERN_ERR PFX "only 1 p-state to transition\n"); |
return -ENODEV; |
} |
ppst = kmalloc(sizeof (struct pst_s) * numps, GFP_KERNEL); |
if (!ppst) { |
printk(KERN_ERR PFX "ppst memory alloc failure\n"); |
return -ENOMEM; |
} |
pst = (struct pst_s *) (psb + 1); |
for (j = 0; j < numps; j++) { |
ppst[j].fid = pst[j].fid; |
ppst[j].vid = pst[j].vid; |
printk(KERN_INFO PFX |
" %d : fid 0x%x, vid 0x%x\n", j, |
ppst[j].fid, ppst[j].vid); |
} |
sort_pst(ppst, numps); |
lastfid = ppst[0].fid; |
if (lastfid > LO_FID_TABLE_TOP) |
printk(KERN_INFO BFX "first fid not in lo freq tbl\n"); |
if ((lastfid > MAX_FID) || (lastfid & 1) || (ppst[0].vid > LEAST_VID)) { |
printk(KERN_ERR BFX "first fid/vid bad (0x%x - 0x%x)\n", |
lastfid, ppst[0].vid); |
kfree(ppst); |
return -ENODEV; |
} |
for (j = 1; j < numps; j++) { |
if ((lastfid >= ppst[j].fid) |
|| (ppst[j].fid & 1) |
|| (ppst[j].fid < HI_FID_TABLE_BOTTOM) |
|| (ppst[j].fid > MAX_FID) |
|| (ppst[j].vid > LEAST_VID)) { |
printk(KERN_ERR BFX |
"invalid fid/vid in pst(%x %x)\n", |
ppst[j].fid, ppst[j].vid); |
kfree(ppst); |
return -ENODEV; |
} |
lastfid = ppst[j].fid; |
} |
for (j = 0; j < numps; j++) { |
if (ppst[j].vid < rvo) { /* vid+rvo >= 0 */ |
printk(KERN_ERR BFX |
"0 vid exceeded with pstate %d\n", j); |
return -ENODEV; |
} |
if (ppst[j].vid < maxvid+rvo) { /* vid+rvo >= maxvid */ |
printk(KERN_ERR BFX |
"maxvid exceeded with pstate %d\n", j); |
return -ENODEV; |
} |
} |
if (query_current_values_with_pending_wait()) { |
kfree(ppst); |
return -EIO; |
} |
printk(KERN_INFO PFX "currfid 0x%x, currvid 0x%x\n", |
currfid, currvid); |
for (j = 0; j < numps; j++) |
if ((ppst[j].fid==currfid) && (ppst[j].vid==currvid)) |
return (0); |
printk(KERN_ERR BFX "currfid/vid do not match PST, ignoring\n"); |
return 0; |
} |
printk(KERN_ERR BFX "no PSB\n"); |
return -ENODEV; |
} |
/* Converts a frequency (that might not necessarily be a multiple of 200) */ |
/* to a fid. */ |
static u32 find_closest_fid(u32 freq, int searchup) |
{ |
if (searchup == SEARCH_UP) |
freq += MIN_FREQ_RESOLUTION - 1; |
freq = (freq / MIN_FREQ_RESOLUTION) * MIN_FREQ_RESOLUTION; |
if (freq < MIN_FREQ) |
freq = MIN_FREQ; |
else if (freq > MAX_FREQ) |
freq = MAX_FREQ; |
return find_fid_from_freq(freq); |
} |
static int find_match(u32 * ptargfreq, u32 * pmin, u32 * pmax, int searchup, u32 * pfid, u32 * pvid) |
{ |
u32 availpstates = batps; |
u32 targfid = find_closest_fid(*ptargfreq, searchup); |
u32 minfid = find_closest_fid(*pmin, SEARCH_DOWN); |
u32 maxfid = find_closest_fid(*pmax, SEARCH_UP); |
u32 minidx = 0; |
u32 maxidx = availpstates - 1; |
u32 targidx = 0xffffffff; |
int i; |
dprintk(KERN_DEBUG PFX "find match: freq %d MHz, min %d, max %d\n", |
*ptargfreq, *pmin, *pmax); |
/* Restrict values to the frequency choices in the PST */ |
if (minfid < ppst[0].fid) |
minfid = ppst[0].fid; |
if (maxfid > ppst[maxidx].fid) |
maxfid = ppst[maxidx].fid; |
/* Find appropriate PST index for the minimim fid */ |
for (i = 0; i < (int) availpstates; i++) { |
if (minfid >= ppst[i].fid) |
minidx = i; |
} |
/* Find appropriate PST index for the maximum fid */ |
for (i = availpstates - 1; i >= 0; i--) { |
if (maxfid <= ppst[i].fid) |
maxidx = i; |
} |
if (minidx > maxidx) |
maxidx = minidx; |
/* Frequency ids are now constrained by limits matching PST entries */ |
minfid = ppst[minidx].fid; |
maxfid = ppst[maxidx].fid; |
/* Limit the target frequency to these limits */ |
if (targfid < minfid) |
targfid = minfid; |
else if (targfid > maxfid) |
targfid = maxfid; |
/* Find the best target index into the PST, contrained by the range */ |
if (searchup == SEARCH_UP) { |
for (i = maxidx; i >= (int) minidx; i--) { |
if (targfid <= ppst[i].fid) |
targidx = i; |
} |
} else { |
for (i = minidx; i <= (int) maxidx; i++) { |
if (targfid >= ppst[i].fid) |
targidx = i; |
} |
} |
if (targidx == 0xffffffff) { |
printk(KERN_ERR PFX "could not find target\n"); |
return 1; |
} |
*pmin = find_freq_from_fid(minfid); |
*pmax = find_freq_from_fid(maxfid); |
*ptargfreq = find_freq_from_fid(ppst[targidx].fid); |
if (pfid) |
*pfid = ppst[targidx].fid; |
if (pvid) |
*pvid = ppst[targidx].vid; |
return 0; |
} |
/* Take a frequency, and issue the fid/vid transition command */ |
static inline int transition_frequency(u32 * preq, u32 * pmin, u32 * pmax, u32 searchup) |
{ |
u32 fid; |
u32 vid; |
int res; |
struct cpufreq_freqs freqs; |
if (find_match(preq, pmin, pmax, searchup, &fid, &vid)) |
return 1; |
dprintk(KERN_DEBUG PFX "table matched fid 0x%x, giving vid 0x%x\n", |
fid, vid); |
if (query_current_values_with_pending_wait()) |
return 1; |
if ((currvid == vid) && (currfid == fid)) { |
dprintk(KERN_DEBUG PFX |
"target matches current values (fid 0x%x, vid 0x%x)\n", |
fid, vid); |
return 0; |
} |
if ((fid < HI_FID_TABLE_BOTTOM) && (currfid < HI_FID_TABLE_BOTTOM)) { |
printk(KERN_ERR PFX |
"ignoring illegal change in lo freq table-%x to %x\n", |
currfid, fid); |
return 1; |
} |
dprintk(KERN_DEBUG PFX "changing to fid 0x%x, vid 0x%x\n", fid, vid); |
freqs.cpu = 0; /* only true because SMP not supported */ |
freqs.old = find_freq_from_fid(currfid); |
freqs.new = find_freq_from_fid(fid); |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
res = transition_fid_vid(fid, vid); |
freqs.new = find_freq_from_fid(currfid); |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
return res; |
} |
/* Driver entry point to switch to the target frequency */ |
static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) |
{ |
u32 checkfid = currfid; |
u32 checkvid = currvid; |
u32 reqfreq = targfreq / 1000; |
u32 minfreq = pol->min / 1000; |
u32 maxfreq = pol->max / 1000; |
if (ppst == 0) { |
printk(KERN_ERR PFX "targ: ppst 0\n"); |
return -ENODEV; |
} |
if (pending_bit_stuck()) { |
printk(KERN_ERR PFX "drv targ fail: change pending bit set\n"); |
return -EIO; |
} |
dprintk(KERN_DEBUG PFX "targ: %d kHz, min %d, max %d, relation %d\n", |
targfreq, pol->min, pol->max, relation); |
if (query_current_values_with_pending_wait()) |
return -EIO; |
dprintk(KERN_DEBUG PFX "targ: curr fid 0x%x, vid 0x%x\n", |
currfid, currvid); |
if ((checkvid != currvid) || (checkfid != currfid)) { |
printk(KERN_ERR PFX |
"error - out of sync, fid 0x%x 0x%x, vid 0x%x 0x%x\n", |
checkfid, currfid, checkvid, currvid); |
} |
if (transition_frequency(&reqfreq, &minfreq, &maxfreq, |
relation == |
CPUFREQ_RELATION_H ? SEARCH_UP : SEARCH_DOWN)) |
{ |
printk(KERN_ERR PFX "transition frequency failed\n"); |
return 1; |
} |
pol->cur = 1000 * find_freq_from_fid(currfid); |
return 0; |
} |
/* Driver entry point to verify the policy and range of frequencies */ |
static int powernowk8_verify(struct cpufreq_policy *pol) |
{ |
u32 min = pol->min / 1000; |
u32 max = pol->max / 1000; |
u32 targ = min; |
int res; |
if (ppst == 0) { |
printk(KERN_ERR PFX "verify - ppst 0\n"); |
return -ENODEV; |
} |
if (pending_bit_stuck()) { |
printk(KERN_ERR PFX "failing verify, change pending bit set\n"); |
return -EIO; |
} |
dprintk(KERN_DEBUG PFX |
"ver: cpu%d, min %d, max %d, cur %d, pol %d\n", pol->cpu, |
pol->min, pol->max, pol->cur, pol->policy); |
if (pol->cpu != 0) { |
printk(KERN_ERR PFX "verify - cpu not 0\n"); |
return -ENODEV; |
} |
#warning pol->policy is in undefined state here |
res = find_match(&targ, &min, &max, |
pol->policy == CPUFREQ_POLICY_POWERSAVE ? |
SEARCH_DOWN : SEARCH_UP, 0, 0); |
if (!res) { |
pol->min = min * 1000; |
pol->max = max * 1000; |
} |
return res; |
} |
/* per CPU init entry point to the driver */ |
static int __init powernowk8_cpu_init(struct cpufreq_policy *pol) |
{ |
if (pol->cpu != 0) { |
printk(KERN_ERR PFX "init not cpu 0\n"); |
return -ENODEV; |
} |
pol->governor = 0; //!!!CPUFREQ_DEFAULT_GOVERNOR; |
/* Take a crude guess here. */ |
pol->cpuinfo.transition_latency = ((rvo + 8) * vstable * VST_UNITS_20US) |
+ (3 * (1 << irt) * 10); |
if (query_current_values_with_pending_wait()) |
return -EIO; |
pol->cur = 1000 * find_freq_from_fid(currfid); |
dprintk(KERN_DEBUG PFX "policy current frequency %d kHz\n", pol->cur); |
/* min/max the cpu is capable of */ |
pol->cpuinfo.min_freq = 1000 * find_freq_from_fid(ppst[0].fid); |
pol->cpuinfo.max_freq = 1000 * find_freq_from_fid(ppst[numps-1].fid); |
pol->min = 1000 * find_freq_from_fid(ppst[0].fid); |
pol->max = 1000 * find_freq_from_fid(ppst[batps - 1].fid); |
printk(KERN_INFO PFX "cpu_init done, current fid 0x%x, vid 0x%x\n", |
currfid, currvid); |
return 0; |
} |
/* driver entry point for init */ |
/*static*/ int __init powernowk8_init(void) |
{ |
int rc; |
dprintk(KERN_INFO PFX VERSION "\n"); |
if (check_supported_cpu() == 0) |
return -ENODEV; |
rc = find_psb_table(); |
if (rc) |
return rc; |
if (pending_bit_stuck()) { |
printk(KERN_ERR PFX "powernowk8_init fail, change pending bit set\n"); |
kfree(ppst); |
return -EIO; |
} |
return cpufreq_register_driver(&cpufreq_amd64_driver); |
} |
/* driver entry point for term */ |
/*static*/ void __exit powernowk8_exit(void) |
{ |
dprintk(KERN_INFO PFX "powernowk8_exit\n"); |
cpufreq_unregister_driver(&cpufreq_amd64_driver); |
kfree(ppst); |
} |
MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>"); |
MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); |
MODULE_LICENSE("GPL"); |
module_init(powernowk8_init); |
module_exit(powernowk8_exit); |
/shark/trunk/drivers/cpu/cpufreq/speedstep-smi.c |
---|
0,0 → 1,364 |
/* |
* Intel SpeedStep SMI driver. |
* |
* (C) 2003 Hiroshi Miura <miura@da-cha.org> |
* |
* Licensed under the terms of the GNU GPL License version 2. |
* |
*/ |
/********************************************************************* |
* SPEEDSTEP - DEFINITIONS * |
*********************************************************************/ |
#include <linuxcomp.h> |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/moduleparam.h> |
#include <linux/init.h> |
#include <linux/cpufreq.h> |
#include <linux/pci.h> |
#include <linux/slab.h> |
#include <asm/ist.h> |
#include "speedstep-lib.h" |
#define PFX "speedstep-smi: " |
/* speedstep system management interface port/command. |
* |
* These parameters are got from IST-SMI BIOS call. |
* If user gives it, these are used. |
* |
*/ |
static int smi_port = 0; |
static int smi_cmd = 0; |
static unsigned int smi_sig = 0; |
/* |
* There are only two frequency states for each processor. Values |
* are in kHz for the time being. |
*/ |
static struct cpufreq_frequency_table speedstep_freqs[] = { |
{SPEEDSTEP_HIGH, 0}, |
{SPEEDSTEP_LOW, 0}, |
{0, CPUFREQ_TABLE_END}, |
}; |
#define GET_SPEEDSTEP_OWNER 0 |
#define GET_SPEEDSTEP_STATE 1 |
#define SET_SPEEDSTEP_STATE 2 |
#define GET_SPEEDSTEP_FREQS 4 |
/* DEBUG |
* Define it if you want verbose debug output, e.g. for bug reporting |
*/ |
#define SPEEDSTEP_DEBUG |
#ifdef SPEEDSTEP_DEBUG |
#define dprintk(msg...) printk(msg) |
#else |
#define dprintk(msg...) do { } while(0) |
#endif |
/** |
* speedstep_smi_ownership |
*/ |
static int speedstep_smi_ownership (void) |
{ |
u32 command, result, magic; |
u32 function = GET_SPEEDSTEP_OWNER; |
unsigned char magic_data[] = "Copyright (c) 1999 Intel Corporation"; |
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
magic = virt_to_phys(magic_data); |
__asm__ __volatile__( |
"out %%al, (%%dx)\n" |
: "=D" (result) |
: "a" (command), "b" (function), "c" (0), "d" (smi_port), "D" (0), "S" (magic) |
); |
return result; |
} |
/** |
* speedstep_smi_get_freqs - get SpeedStep preferred & current freq. |
* |
*/ |
static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high) |
{ |
u32 command, result, edi, high_mhz, low_mhz; |
u32 state=0; |
u32 function = GET_SPEEDSTEP_FREQS; |
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
__asm__ __volatile__("movl $0, %%edi\n" |
"out %%al, (%%dx)\n" |
: "=a" (result), "=b" (high_mhz), "=c" (low_mhz), "=d" (state), "=D" (edi) |
: "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0) |
); |
*high = high_mhz * 1000; |
*low = low_mhz * 1000; |
return result; |
} |
/** |
* speedstep_get_state - set the SpeedStep state |
* @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) |
* |
*/ |
static int speedstep_get_state (void) |
{ |
u32 function=GET_SPEEDSTEP_STATE; |
u32 result, state, edi, command; |
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
__asm__ __volatile__("movl $0, %%edi\n" |
"out %%al, (%%dx)\n" |
: "=a" (result), "=b" (state), "=D" (edi) |
: "a" (command), "b" (function), "c" (0), "d" (smi_port), "S" (0) |
); |
return state; |
} |
/** |
* speedstep_set_state - set the SpeedStep state |
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) |
* |
*/ |
static void speedstep_set_state (unsigned int state, unsigned int notify) |
{ |
unsigned int old_state, result, command, new_state; |
unsigned long flags; |
struct cpufreq_freqs freqs; |
unsigned int function=SET_SPEEDSTEP_STATE; |
if (state > 0x1) |
return; |
old_state = speedstep_get_state(); |
freqs.old = speedstep_freqs[old_state].frequency; |
freqs.new = speedstep_freqs[state].frequency; |
freqs.cpu = 0; /* speedstep.c is UP only driver */ |
if (old_state == state) |
return; |
//!!!if (notify) |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
/* Disable IRQs */ |
local_irq_save(flags); |
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
__asm__ __volatile__( |
"movl $0, %%edi\n" |
"out %%al, (%%dx)\n" |
: "=b" (new_state), "=D" (result) |
: "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0) |
); |
/* enable IRQs */ |
local_irq_restore(flags); |
if (new_state == state) { |
dprintk(KERN_INFO "cpufreq: change to %u MHz succeded\n", (freqs.new / 1000)); |
} else { |
printk(KERN_ERR "cpufreq: change failed\n"); |
} |
//!!!if (notify) |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
return; |
} |
/** |
* speedstep_target - set a new CPUFreq policy |
* @policy: new policy |
* @target_freq: new freq |
* @relation: |
* |
* Sets a new CPUFreq policy/freq. |
*/ |
static int speedstep_target (struct cpufreq_policy *policy, |
unsigned int target_freq, unsigned int relation) |
{ |
unsigned int newstate = 0; |
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) |
return -EINVAL; |
speedstep_set_state(newstate, 1); |
return 0; |
} |
/** |
* speedstep_verify - verifies a new CPUFreq policy |
* @freq: new policy |
* |
* Limit must be within speedstep_low_freq and speedstep_high_freq, with |
* at least one border included. |
*/ |
static int speedstep_verify (struct cpufreq_policy *policy) |
{ |
return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); |
} |
static int speedstep_cpu_init(struct cpufreq_policy *policy) |
{ |
int result; |
unsigned int speed,state; |
/* capability check */ |
if (policy->cpu != 0) |
return -ENODEV; |
result = speedstep_smi_ownership(); |
if (result) |
dprintk(KERN_INFO "cpufreq: fails an aquiring ownership of a SMI interface.\n"); |
/* detect low and high frequency */ |
result = speedstep_smi_get_freqs(&speedstep_freqs[SPEEDSTEP_LOW].frequency, |
&speedstep_freqs[SPEEDSTEP_HIGH].frequency); |
if (result) { |
/* fall back to speedstep_lib.c dection mechanism: try both states out */ |
unsigned int speedstep_processor = speedstep_detect_processor(); |
dprintk(KERN_INFO PFX "could not detect low and high frequencies by SMI call.\n"); |
if (!speedstep_processor) |
return -ENODEV; |
result = speedstep_get_freqs(speedstep_processor, |
&speedstep_freqs[SPEEDSTEP_LOW].frequency, |
&speedstep_freqs[SPEEDSTEP_HIGH].frequency, |
&speedstep_set_state); |
if (result) { |
dprintk(KERN_INFO PFX "could not detect two different speeds -- aborting.\n"); |
return result; |
} else |
dprintk(KERN_INFO PFX "workaround worked.\n"); |
} |
/* get current speed setting */ |
state = speedstep_get_state(); |
speed = speedstep_freqs[state].frequency; |
dprintk(KERN_INFO "cpufreq: currently at %s speed setting - %i MHz\n", |
(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", |
(speed / 1000)); |
/* cpuinfo and default policy values */ |
policy->governor = 0; //!!!CPUFREQ_DEFAULT_GOVERNOR; |
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
policy->cur = speed; |
return cpufreq_frequency_table_cpuinfo(policy, &speedstep_freqs[0]); |
} |
static int speedstep_resume(struct cpufreq_policy *policy) |
{ |
int result = speedstep_smi_ownership(); |
if (result) |
dprintk(KERN_INFO "cpufreq: fails an aquiring ownership of a SMI interface.\n"); |
return result; |
} |
static struct cpufreq_driver speedstep_driver = { |
.name = "speedstep-smi", |
.verify = speedstep_verify, |
.target = speedstep_target, |
.init = speedstep_cpu_init, |
.resume = speedstep_resume, |
}; |
/** |
* speedstep_init - initializes the SpeedStep CPUFreq driver |
* |
* Initializes the SpeedStep support. Returns -ENODEV on unsupported |
* BIOS, -EINVAL on problems during initiatization, and zero on |
* success. |
*/ |
/*static*/ int __init speedstep_smi_init(void) |
{ |
struct cpuinfo_x86 *c = &new_cpu_data; |
if (c->x86_vendor != X86_VENDOR_INTEL) { |
dprintk (KERN_INFO PFX "No Intel CPU detected.\n"); |
return -ENODEV; |
} |
dprintk(KERN_DEBUG PFX "signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n", |
ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); |
/* Error if no IST-SMI BIOS or no PARM |
sig= 'ISGE' aka 'Intel Speedstep Gate E' */ |
if ((ist_info.signature != 0x47534943) && ( |
(smi_port == 0) || (smi_cmd == 0))) |
return -ENODEV; |
if (smi_sig == 1) |
smi_sig = 0x47534943; |
else |
smi_sig = ist_info.signature; |
/* setup smi_port from MODLULE_PARM or BIOS */ |
if ((smi_port > 0xff) || (smi_port < 0)) { |
return -EINVAL; |
} else if (smi_port == 0) { |
smi_port = ist_info.command & 0xff; |
} |
if ((smi_cmd > 0xff) || (smi_cmd < 0)) { |
return -EINVAL; |
} else if (smi_cmd == 0) { |
smi_cmd = (ist_info.command >> 16) & 0xff; |
} |
return cpufreq_register_driver(&speedstep_driver); |
} |
/** |
* speedstep_exit - unregisters SpeedStep support |
* |
* Unregisters SpeedStep support. |
*/ |
/*static*/ void __exit speedstep_smi_exit(void) |
{ |
cpufreq_unregister_driver(&speedstep_driver); |
} |
module_param(smi_port, int, 0444); |
module_param(smi_cmd, int, 0444); |
module_param(smi_sig, uint, 0444); |
MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value -- Intel's default setting is 0xb2"); |
MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value -- Intel's default setting is 0x82"); |
MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the SMI interface."); |
MODULE_AUTHOR ("Hiroshi Miura"); |
MODULE_DESCRIPTION ("Speedstep driver for IST applet SMI interface."); |
MODULE_LICENSE ("GPL"); |
module_init(speedstep_smi_init); |
module_exit(speedstep_smi_exit); |
/shark/trunk/drivers/cpu/cpufreq/freq_table.c |
---|
0,0 → 1,203 |
/* |
* linux/drivers/cpufreq/freq_table.c |
* |
* Copyright (C) 2002 - 2003 Dominik Brodowski |
*/ |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/init.h> |
#include <linux/cpufreq.h> |
/********************************************************************* |
* FREQUENCY TABLE HELPERS * |
*********************************************************************/ |
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, |
struct cpufreq_frequency_table *table) |
{ |
unsigned int min_freq = ~0; |
unsigned int max_freq = 0; |
unsigned int i = 0; |
for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { |
unsigned int freq = table[i].frequency; |
if (freq == CPUFREQ_ENTRY_INVALID) |
continue; |
if (freq < min_freq) |
min_freq = freq; |
if (freq > max_freq) |
max_freq = freq; |
} |
policy->min = policy->cpuinfo.min_freq = min_freq; |
policy->max = policy->cpuinfo.max_freq = max_freq; |
if (policy->min == ~0) |
return -EINVAL; |
else |
return 0; |
} |
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); |
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, |
struct cpufreq_frequency_table *table) |
{ |
unsigned int next_larger = ~0; |
unsigned int i = 0; |
unsigned int count = 0; |
if (!cpu_online(policy->cpu)) |
return -EINVAL; |
cpufreq_verify_within_limits(policy, |
policy->cpuinfo.min_freq, |
policy->cpuinfo.max_freq); |
for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { |
unsigned int freq = table[i].frequency; |
if (freq == CPUFREQ_ENTRY_INVALID) |
continue; |
if ((freq >= policy->min) && (freq <= policy->max)) |
count++; |
else if ((next_larger > freq) && (freq > policy->max)) |
next_larger = freq; |
} |
if (!count) |
policy->max = next_larger; |
cpufreq_verify_within_limits(policy, |
policy->cpuinfo.min_freq, |
policy->cpuinfo.max_freq); |
return 0; |
} |
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); |
int cpufreq_frequency_table_target(struct cpufreq_policy *policy, |
struct cpufreq_frequency_table *table, |
unsigned int target_freq, |
unsigned int relation, |
unsigned int *index) |
{ |
struct cpufreq_frequency_table optimal = { .index = ~0, }; |
struct cpufreq_frequency_table suboptimal = { .index = ~0, }; |
unsigned int i; |
switch (relation) { |
case CPUFREQ_RELATION_H: |
optimal.frequency = 0; |
suboptimal.frequency = ~0; |
break; |
case CPUFREQ_RELATION_L: |
optimal.frequency = ~0; |
suboptimal.frequency = 0; |
break; |
} |
if (!cpu_online(policy->cpu)) |
return -EINVAL; |
for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { |
unsigned int freq = table[i].frequency; |
if (freq == CPUFREQ_ENTRY_INVALID) |
continue; |
if ((freq < policy->min) || (freq > policy->max)) |
continue; |
switch(relation) { |
case CPUFREQ_RELATION_H: |
if (freq <= target_freq) { |
if (freq >= optimal.frequency) { |
optimal.frequency = freq; |
optimal.index = i; |
} |
} else { |
if (freq <= suboptimal.frequency) { |
suboptimal.frequency = freq; |
suboptimal.index = i; |
} |
} |
break; |
case CPUFREQ_RELATION_L: |
if (freq >= target_freq) { |
if (freq <= optimal.frequency) { |
optimal.frequency = freq; |
optimal.index = i; |
} |
} else { |
if (freq >= suboptimal.frequency) { |
suboptimal.frequency = freq; |
suboptimal.index = i; |
} |
} |
break; |
} |
} |
if (optimal.index > i) { |
if (suboptimal.index > i) |
return -EINVAL; |
*index = suboptimal.index; |
} else |
*index = optimal.index; |
return 0; |
} |
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); |
static struct cpufreq_frequency_table *show_table[NR_CPUS]; |
/** |
* show_scaling_governor - show the current policy for the specified CPU |
*/ |
static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf) |
{ |
unsigned int i = 0; |
unsigned int cpu = policy->cpu; |
ssize_t count = 0; |
struct cpufreq_frequency_table *table; |
if (!show_table[cpu]) |
return -ENODEV; |
table = show_table[cpu]; |
for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { |
if (table[i].frequency == CPUFREQ_ENTRY_INVALID) |
continue; |
count += sprintf26(&buf[count], "%d ", table[i].frequency); |
} |
count += sprintf26(&buf[count], "\n"); |
return count; |
} |
struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { |
.attr = { .name = "scaling_available_frequencies", .mode = 0444 }, |
.show = show_available_freqs, |
}; |
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); |
/* |
* if you use these, you must assure that the frequency table is valid |
* all the time between get_attr and put_attr! |
*/ |
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, |
unsigned int cpu) |
{ |
show_table[cpu] = table; |
} |
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); |
void cpufreq_frequency_table_put_attr(unsigned int cpu) |
{ |
show_table[cpu] = NULL; |
} |
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); |
MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>"); |
MODULE_DESCRIPTION ("CPUfreq frequency table helpers"); |
MODULE_LICENSE ("GPL"); |
/shark/trunk/drivers/cpu/cpufreq/powernow-k7.h |
---|
0,0 → 1,44 |
/* |
* $Id: powernow-k7.h,v 1.1 2004-04-23 14:24:13 mauro Exp $ |
* (C) 2003 Dave Jones. |
* |
* Licensed under the terms of the GNU GPL License version 2. |
* |
* AMD-specific information |
* |
*/ |
union msr_fidvidctl { |
struct { |
unsigned FID:5, // 4:0 |
reserved1:3, // 7:5 |
VID:5, // 12:8 |
reserved2:3, // 15:13 |
FIDC:1, // 16 |
VIDC:1, // 17 |
reserved3:2, // 19:18 |
FIDCHGRATIO:1, // 20 |
reserved4:11, // 31-21 |
SGTC:20, // 32:51 |
reserved5:12; // 63:52 |
} bits; |
unsigned long long val; |
}; |
union msr_fidvidstatus { |
struct { |
unsigned CFID:5, // 4:0 |
reserved1:3, // 7:5 |
SFID:5, // 12:8 |
reserved2:3, // 15:13 |
MFID:5, // 20:16 |
reserved3:11, // 31:21 |
CVID:5, // 36:32 |
reserved4:3, // 39:37 |
SVID:5, // 44:40 |
reserved5:3, // 47:45 |
MVID:5, // 52:48 |
reserved6:11; // 63:53 |
} bits; |
unsigned long long val; |
}; |
/shark/trunk/drivers/cpu/cpufreq/powernow-k8.h |
---|
0,0 → 1,126 |
/* |
* (c) 2003 Advanced Micro Devices, Inc. |
* Your use of this code is subject to the terms and conditions of the |
* GNU general public license version 2. See "../../../COPYING" or |
* http://www.gnu.org/licenses/gpl.html |
*/ |
/* processor's cpuid instruction support */ |
#define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */ |
#define CPUID_F1_FAM 0x00000f00 /* family mask */ |
#define CPUID_F1_XFAM 0x0ff00000 /* extended family mask */ |
#define CPUID_F1_MOD 0x000000f0 /* model mask */ |
#define CPUID_F1_STEP 0x0000000f /* stepping level mask */ |
#define CPUID_XFAM_MOD 0x0ff00ff0 /* xtended fam, fam + model */ |
#define ATHLON64_XFAM_MOD 0x00000f40 /* xtended fam, fam + model */ |
#define OPTERON_XFAM_MOD 0x00000f50 /* xtended fam, fam + model */ |
#define ATHLON64_REV_C0 8 |
#define CPUID_GET_MAX_CAPABILITIES 0x80000000 |
#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 |
#define P_STATE_TRANSITION_CAPABLE 6 |
/* Model Specific Registers for p-state transitions. MSRs are 64-bit. For */ |
/* writes (wrmsr - opcode 0f 30), the register number is placed in ecx, and */ |
/* the value to write is placed in edx:eax. For reads (rdmsr - opcode 0f 32), */ |
/* the register number is placed in ecx, and the data is returned in edx:eax. */ |
#define MSR_FIDVID_CTL 0xc0010041 |
#define MSR_FIDVID_STATUS 0xc0010042 |
/* Field definitions within the FID VID Low Control MSR : */ |
#define MSR_C_LO_INIT_FID_VID 0x00010000 |
#define MSR_C_LO_NEW_VID 0x00001f00 |
#define MSR_C_LO_NEW_FID 0x0000002f |
#define MSR_C_LO_VID_SHIFT 8 |
/* Field definitions within the FID VID High Control MSR : */ |
#define MSR_C_HI_STP_GNT_TO 0x000fffff |
/* Field definitions within the FID VID Low Status MSR : */ |
#define MSR_S_LO_CHANGE_PENDING 0x80000000 /* cleared when completed */ |
#define MSR_S_LO_MAX_RAMP_VID 0x1f000000 |
#define MSR_S_LO_MAX_FID 0x003f0000 |
#define MSR_S_LO_START_FID 0x00003f00 |
#define MSR_S_LO_CURRENT_FID 0x0000003f |
/* Field definitions within the FID VID High Status MSR : */ |
#define MSR_S_HI_MAX_WORKING_VID 0x001f0000 |
#define MSR_S_HI_START_VID 0x00001f00 |
#define MSR_S_HI_CURRENT_VID 0x0000001f |
/* fids (frequency identifiers) are arranged in 2 tables - lo and hi */ |
#define LO_FID_TABLE_TOP 6 |
#define HI_FID_TABLE_BOTTOM 8 |
#define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */ |
#define HI_VCOFREQ_TABLE_BOTTOM 1600 |
#define MIN_FREQ_RESOLUTION 200 /* fids jump by 2 matching freq jumps by 200 */ |
#define MAX_FID 0x2a /* Spec only gives FID values as far as 5 GHz */ |
#define LEAST_VID 0x1e /* Lowest (numerically highest) useful vid value */ |
#define MIN_FREQ 800 /* Min and max freqs, per spec */ |
#define MAX_FREQ 5000 |
#define INVALID_FID_MASK 0xffffffc1 /* not a valid fid if these bits are set */ |
#define INVALID_VID_MASK 0xffffffe0 /* not a valid vid if these bits are set */ |
#define STOP_GRANT_5NS 1 /* min poss memory access latency for voltage change */ |
#define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */ |
#define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */ |
#define VST_UNITS_20US 20 /* Voltage Stabalization Time is in units of 20us */ |
/* |
Version 1.4 of the PSB table. This table is constructed by BIOS and is |
to tell the OS's power management driver which VIDs and FIDs are |
supported by this particular processor. This information is obtained from |
the data sheets for each processor model by the system vendor and |
incorporated into the BIOS. |
If the data in the PSB / PST is wrong, then this driver will program the |
wrong values into hardware, which is very likely to lead to a crash. |
*/ |
#define PSB_ID_STRING "AMDK7PNOW!" |
#define PSB_ID_STRING_LEN 10 |
#define PSB_VERSION_1_4 0x14 |
struct psb_s { |
u8 signature[10]; |
u8 tableversion; |
u8 flags1; |
u16 voltagestabilizationtime; |
u8 flags2; |
u8 numpst; |
u32 cpuid; |
u8 plllocktime; |
u8 maxfid; |
u8 maxvid; |
u8 numpstates; |
}; |
/* Pairs of fid/vid values are appended to the version 1.4 PSB table. */ |
struct pst_s { |
u8 fid; |
u8 vid; |
}; |
#ifdef DEBUG |
#define dprintk(msg...) printk(msg) |
#else |
#define dprintk(msg...) do { } while(0) |
#endif |
static inline int core_voltage_pre_transition(u32 reqvid); |
static inline int core_voltage_post_transition(u32 reqvid); |
static inline int core_frequency_transition(u32 reqfid); |
static int powernowk8_verify(struct cpufreq_policy *pol); |
static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, |
unsigned relation); |
static int __init powernowk8_cpu_init(struct cpufreq_policy *pol); |
/shark/trunk/drivers/cpu/cpufreq/speedstep-lib.h |
---|
0,0 → 1,41 |
/* |
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
* |
* Licensed under the terms of the GNU GPL License version 2. |
* |
* Library for common functions for Intel SpeedStep v.1 and v.2 support |
* |
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* |
*/ |
/* processors */ |
#define SPEEDSTEP_PROCESSOR_PIII_C_EARLY 0x00000001 /* Coppermine core */ |
#define SPEEDSTEP_PROCESSOR_PIII_C 0x00000002 /* Coppermine core */ |
#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */ |
#define SPEEDSTEP_PROCESSOR_P4M 0x00000004 /* P4-M with 100 MHz FSB */ |
/* speedstep states -- only two of them */ |
#define SPEEDSTEP_HIGH 0x00000000 |
#define SPEEDSTEP_LOW 0x00000001 |
/* detect a speedstep-capable processor */ |
extern unsigned int speedstep_detect_processor (void); |
/* detect the current speed (in khz) of the processor */ |
extern unsigned int speedstep_get_processor_frequency(unsigned int processor); |
/* detect the low and high speeds of the processor. The callback |
* set_state"'s first argument is either SPEEDSTEP_HIGH or |
* SPEEDSTEP_LOW; the second argument is zero so that no |
* cpufreq_notify_transition calls are initiated. |
*/ |
extern unsigned int speedstep_get_freqs(unsigned int processor, |
unsigned int *low_speed, |
unsigned int *high_speed, |
void (*set_state) (unsigned int state, unsigned int notify)); |
/shark/trunk/drivers/cpu/cpufreq/cpufreq.c |
---|
0,0 → 1,139 |
/* |
* Project: S.Ha.R.K. |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors : |
* Mauro Marinoni <mauro.marinoni@unipv.it> |
* |
* |
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
*/ |
/* |
* This file was based upon code in Powertweak Linux (http://powertweak.sf.net) |
* (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne P�k�� Dominik Brodowski. |
* |
* Licensed under the terms of the GNU GPL License version 2. |
* |
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* |
*/ |
#include <linuxcomp.h> |
#include <linux/config.h> |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/init.h> |
#include <linux/cpufreq.h> |
#include <linux/delay.h> |
#include <linux/interrupt.h> |
#include <linux/spinlock.h> |
#include <linux/slab.h> |
#include <linux/cpu.h> |
/** |
* The "cpufreq driver" - the arch- or hardware-dependend low |
* level driver of CPUFreq support, and its spinlock. This lock |
* also protects the cpufreq_cpu_data array. |
*/ |
static struct cpufreq_driver *cpufreq_driver; |
static struct cpufreq_policy *cpufreq_cpu_data; |
static spinlock_t cpufreq_driver_lock = SPIN_LOCK_UNLOCKED; |
/********************************************************************* |
* USER * |
*********************************************************************/ |
int cpufreq_target(unsigned int target_freq, unsigned int relation) |
{ |
return cpufreq_driver_target(cpufreq_cpu_data, target_freq, relation); |
} |
/********************************************************************* |
* GOVERNOR * |
*********************************************************************/ |
int cpufreq_driver_target(struct cpufreq_policy *policy, |
unsigned int target_freq, |
unsigned int relation) |
{ |
unsigned int ret; |
if (!policy) |
return -EINVAL; |
ret = cpufreq_driver->target(policy, target_freq, relation); |
return ret; |
} |
/********************************************************************* |
* REGISTER / UNREGISTER CPUFREQ DRIVER * |
*********************************************************************/ |
/** |
* cpufreq_register_driver - register a CPU Frequency driver |
* @driver_data: A struct cpufreq_driver containing the values# |
* submitted by the CPU Frequency driver. |
* |
* Registers a CPU Frequency driver to this core code. This code |
* returns zero on success, -EBUSY when another driver got here first |
* (and isn't unregistered in the meantime). |
* |
*/ |
int cpufreq_register_driver(struct cpufreq_driver *driver_data) |
{ |
unsigned long flags; |
if (!driver_data || !driver_data->verify || !driver_data->init || |
((!driver_data->setpolicy) && (!driver_data->target))) |
return -EINVAL; |
spin_lock_irqsave(&cpufreq_driver_lock, flags); |
if (cpufreq_driver) { |
spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
return -EBUSY; |
} |
cpufreq_driver = driver_data; |
spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
/* Init & verify - TODO */ |
cpufreq_driver->init(cpufreq_cpu_data); |
cpufreq_driver->verify(cpufreq_cpu_data); |
return 0; //sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver); |
} |
/** |
* cpufreq_unregister_driver - unregister the current CPUFreq driver |
* |
* Unregister the current CPUFreq driver. Only call this if you have |
* the right to do so, i.e. if you have succeeded in initialising before! |
* Returns zero if successful, and -EINVAL if the cpufreq_driver is |
* currently not initialised. |
*/ |
int cpufreq_unregister_driver(struct cpufreq_driver *driver) |
{ |
unsigned long flags; |
if (!cpufreq_driver || (driver != cpufreq_driver)) |
return -EINVAL; |
/* Exit */ |
cpufreq_driver->exit(cpufreq_cpu_data); |
//sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); |
spin_lock_irqsave(&cpufreq_driver_lock, flags); |
cpufreq_driver = NULL; |
spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
return 0; |
} |
/shark/trunk/drivers/cpu/cpufreq/gx-suspmod.c |
---|
0,0 → 1,516 |
/* |
* Cyrix MediaGX and NatSemi Geode Suspend Modulation |
* (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com> |
* (C) 2002 Hiroshi Miura <miura@da-cha.org> |
* All Rights Reserved |
* |
* This program is free software; you can redistribute it and/or |
* modify it under the terms of the GNU General Public License |
* version 2 as published by the Free Software Foundation |
* |
* The author(s) of this software shall not be held liable for damages |
* of any nature resulting due to the use of this software. This |
* software is provided AS-IS with no warranties. |
* |
* Theoritical note: |
* |
* (see Geode(tm) CS5530 manual (rev.4.1) page.56) |
* |
* CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0 |
* are based on Suspend Moduration. |
* |
* Suspend Modulation works by asserting and de-asserting the SUSP# pin |
* to CPU(GX1/GXLV) for configurable durations. When asserting SUSP# |
* the CPU enters an idle state. GX1 stops its core clock when SUSP# is |
* asserted then power consumption is reduced. |
* |
* Suspend Modulation's OFF/ON duration are configurable |
* with 'Suspend Modulation OFF Count Register' |
* and 'Suspend Modulation ON Count Register'. |
* These registers are 8bit counters that represent the number of |
* 32us intervals which the SUSP# pin is asserted/de-asserted to the |
* processor. |
* |
* These counters define a ratio which is the effective frequency |
* of operation of the system. |
* |
* On Count |
* F_eff = Fgx * ---------------------- |
* On Count + Off Count |
* |
* 0 <= On Count, Off Count <= 255 |
* |
* From these limits, we can get register values |
* |
* on_duration + off_duration <= MAX_DURATION |
* off_duration = on_duration * (stock_freq - freq) / freq |
* |
* on_duration = (freq * DURATION) / stock_freq |
* off_duration = DURATION - on_duration |
* |
* |
*--------------------------------------------------------------------------- |
* |
* ChangeLog: |
* Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org> |
* - rewrite for Cyrix MediaGX Cx5510/5520 and |
* NatSemi Geode Cs5530(A). |
* |
* Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com> |
* - cs5530_mod patch for 2.4.19-rc1. |
* |
*--------------------------------------------------------------------------- |
* |
* Todo |
* Test on machines with 5510, 5530, 5530A |
*/ |
/************************************************************************ |
* Suspend Modulation - Definitions * |
************************************************************************/ |
#include <linuxcomp.h> |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/sched.h> |
#include <linux/init.h> |
#include <linux/smp.h> |
#include <linux/cpufreq.h> |
#include <linux/pci.h> |
#include <asm/processor.h> |
#include <asm/errno.h> |
extern struct cpuinfo_x86 new_cpu_data; |
extern unsigned long cpu_khz; |
/* PCI config registers, all at F0 */ |
#define PCI_PMER1 0x80 /* power management enable register 1 */ |
#define PCI_PMER2 0x81 /* power management enable register 2 */ |
#define PCI_PMER3 0x82 /* power management enable register 3 */ |
#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */ |
#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */ |
#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */ |
#define PCI_MODON 0x95 /* suspend modulation ON counter register */ |
#define PCI_SUSCFG 0x96 /* suspend configuration register */ |
/* PMER1 bits */ |
#define GPM (1<<0) /* global power management */ |
#define GIT (1<<1) /* globally enable PM device idle timers */ |
#define GTR (1<<2) /* globally enable IO traps */ |
#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */ |
#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */ |
/* SUSCFG bits */ |
#define SUSMOD (1<<0) /* enable/disable suspend modulation */ |
/* the belows support only with cs5530 (after rev.1.2)/cs5530A */ |
#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */ |
/* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */ |
#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */ |
/* the belows support only with cs5530A */ |
#define PWRSVE_ISA (1<<3) /* stop ISA clock */ |
#define PWRSVE (1<<4) /* active idle */ |
struct gxfreq_params { |
u8 on_duration; |
u8 off_duration; |
u8 pci_suscfg; |
u8 pci_pmer1; |
u8 pci_pmer2; |
u8 pci_rev; |
struct pci_dev *cs55x0; |
}; |
static struct gxfreq_params *gx_params; |
static int stock_freq; |
/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */ |
static int pci_busclk = 0; |
MODULE_PARM(pci_busclk, "i"); |
/* maximum duration for which the cpu may be suspended |
* (32us * MAX_DURATION). If no parameter is given, this defaults |
* to 255. |
* Note that this leads to a maximum of 8 ms(!) where the CPU clock |
* is suspended -- processing power is just 0.39% of what it used to be, |
* though. 781.25 kHz(!) for a 200 MHz processor -- wow. */ |
static int max_duration = 255; |
MODULE_PARM(max_duration, "i"); |
/* For the default policy, we want at least some processing power |
* - let's say 5%. (min = maxfreq / POLICY_MIN_DIV) |
*/ |
#define POLICY_MIN_DIV 20 |
/* DEBUG |
* Define it if you want verbose debug output |
*/ |
#define SUSPMOD_DEBUG 1 |
#ifdef SUSPMOD_DEBUG |
#define dprintk(msg...) printk(KERN_DEBUG "cpufreq:" msg) |
#else |
#define dprintk(msg...) do { } while(0) |
#endif |
/** |
* we can detect a core multipiler from dir0_lsb |
* from GX1 datasheet p.56, |
* MULT[3:0]: |
* 0000 = SYSCLK multiplied by 4 (test only) |
* 0001 = SYSCLK multiplied by 10 |
* 0010 = SYSCLK multiplied by 4 |
* 0011 = SYSCLK multiplied by 6 |
* 0100 = SYSCLK multiplied by 9 |
* 0101 = SYSCLK multiplied by 5 |
* 0110 = SYSCLK multiplied by 7 |
* 0111 = SYSCLK multiplied by 8 |
* of 33.3MHz |
**/ |
static int gx_freq_mult[16] = { |
4, 10, 4, 6, 9, 5, 7, 8, |
0, 0, 0, 0, 0, 0, 0, 0 |
}; |
/**************************************************************** |
* Low Level chipset interface * |
****************************************************************/ |
static struct pci_device_id gx_chipset_tbl[] __initdata = { |
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID }, |
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID }, |
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID }, |
{ 0, }, |
}; |
/** |
* gx_detect_chipset: |
* |
**/ |
static __init struct pci_dev *gx_detect_chipset(void) |
{ |
struct pci_dev *gx_pci = NULL; |
/* check if CPU is a MediaGX or a Geode. */ |
if ((new_cpu_data.x86_vendor != X86_VENDOR_NSC) && |
(new_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { |
dprintk(KERN_INFO "gx-suspmod: error: no MediaGX/Geode processor found!\n"); |
return NULL; |
} |
/* detect which companion chip is used */ |
while ((gx_pci = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) { |
if ((pci_match_device (gx_chipset_tbl, gx_pci)) != NULL) { |
return gx_pci; |
} |
} |
dprintk(KERN_INFO "gx-suspmod: error: no supported chipset found!\n"); |
return NULL; |
} |
/** |
* gx_get_cpuspeed: |
* |
* Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs. |
*/ |
static int gx_get_cpuspeed(void) |
{ |
if ((gx_params->pci_suscfg & SUSMOD) == 0) |
return stock_freq; |
return (stock_freq * gx_params->on_duration) |
/ (gx_params->on_duration + gx_params->off_duration); |
} |
/** |
* gx_validate_speed: |
* determine current cpu speed |
* |
**/ |
static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration) |
{ |
unsigned int i; |
u8 tmp_on, tmp_off; |
int old_tmp_freq = stock_freq; |
int tmp_freq; |
*on_duration=1; |
*off_duration=0; |
for (i=max_duration; i>0; i--) { |
tmp_on = ((khz * i) / stock_freq) & 0xff; |
tmp_off = i - tmp_on; |
tmp_freq = (stock_freq * tmp_on) / i; |
/* if this relation is closer to khz, use this. If it's equal, |
* prefer it, too - lower latency */ |
if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) { |
*on_duration = tmp_on; |
*off_duration = tmp_off; |
old_tmp_freq = tmp_freq; |
} |
} |
return old_tmp_freq; |
} |
/** |
* gx_set_cpuspeed: |
* set cpu speed in khz. |
**/ |
static void gx_set_cpuspeed(unsigned int khz) |
{ |
u8 suscfg, pmer1; |
unsigned int new_khz; |
unsigned long flags; |
struct cpufreq_freqs freqs; |
freqs.cpu = 0; |
freqs.old = gx_get_cpuspeed(); |
new_khz = gx_validate_speed(khz, &gx_params->on_duration, &gx_params->off_duration); |
freqs.new = new_khz; |
if (new_khz == stock_freq) { /* if new khz == 100% of CPU speed, it is special case */ |
local_irq_save(flags); |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, (gx_params->pci_suscfg & ~(SUSMOD))); |
pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &(gx_params->pci_suscfg)); |
local_irq_restore(flags); |
dprintk("suspend modulation disabled: cpu runs 100 percent speed.\n"); |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
return; |
} |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
local_irq_save(flags); |
switch (gx_params->cs55x0->device) { |
case PCI_DEVICE_ID_CYRIX_5530_LEGACY: |
pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP; |
/* FIXME: need to test other values -- Zwane,Miura */ |
pci_write_config_byte(gx_params->cs55x0, PCI_IRQTC, 4); /* typical 2 to 4ms */ |
pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */ |
pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1); |
if (gx_params->pci_rev < 0x10) { /* CS5530(rev 1.2, 1.3) */ |
suscfg = gx_params->pci_suscfg | SUSMOD; |
} else { /* CS5530A,B.. */ |
suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE; |
} |
break; |
case PCI_DEVICE_ID_CYRIX_5520: |
case PCI_DEVICE_ID_CYRIX_5510: |
suscfg = gx_params->pci_suscfg | SUSMOD; |
break; |
default: |
local_irq_restore(flags); |
dprintk("fatal: try to set unknown chipset.\n"); |
return; |
} |
pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration); |
pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration); |
pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg); |
pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg); |
local_irq_restore(flags); |
gx_params->pci_suscfg = suscfg; |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", |
gx_params->on_duration * 32, gx_params->off_duration * 32); |
dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); |
} |
/**************************************************************** |
* High level functions * |
****************************************************************/ |
/* |
* cpufreq_gx_verify: test if frequency range is valid |
* |
* This function checks if a given frequency range in kHz is valid |
* for the hardware supported by the driver. |
*/ |
static int cpufreq_gx_verify(struct cpufreq_policy *policy) |
{ |
unsigned int tmp_freq = 0; |
u8 tmp1, tmp2; |
if (!stock_freq || !policy) |
return -EINVAL; |
policy->cpu = 0; |
cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); |
/* it needs to be assured that at least one supported frequency is |
* within policy->min and policy->max. If it is not, policy->max |
* needs to be increased until one freuqency is supported. |
* policy->min may not be decreased, though. This way we guarantee a |
* specific processing capacity. |
*/ |
tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2); |
if (tmp_freq < policy->min) |
tmp_freq += stock_freq / max_duration; |
policy->min = tmp_freq; |
if (policy->min > policy->max) |
policy->max = tmp_freq; |
tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2); |
if (tmp_freq > policy->max) |
tmp_freq -= stock_freq / max_duration; |
policy->max = tmp_freq; |
if (policy->max < policy->min) |
policy->max = policy->min; |
cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); |
return 0; |
} |
/* |
* cpufreq_gx_target: |
* |
*/ |
static int cpufreq_gx_target(struct cpufreq_policy *policy, |
unsigned int target_freq, |
unsigned int relation) |
{ |
u8 tmp1, tmp2; |
unsigned int tmp_freq; |
if (!stock_freq || !policy) |
return -EINVAL; |
policy->cpu = 0; |
tmp_freq = gx_validate_speed(target_freq, &tmp1, &tmp2); |
while (tmp_freq < policy->min) { |
tmp_freq += stock_freq / max_duration; |
tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); |
} |
while (tmp_freq > policy->max) { |
tmp_freq -= stock_freq / max_duration; |
tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); |
} |
gx_set_cpuspeed(tmp_freq); |
return 0; |
} |
static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) |
{ |
int maxfreq, curfreq; |
if (!policy || policy->cpu != 0) |
return -ENODEV; |
/* determine maximum frequency */ |
if (pci_busclk) { |
maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; |
} else if (cpu_khz) { |
maxfreq = cpu_khz; |
} else { |
maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; |
} |
stock_freq = maxfreq; |
curfreq = gx_get_cpuspeed(); |
dprintk("cpu max frequency is %d.\n", maxfreq); |
dprintk("cpu current frequency is %dkHz.\n",curfreq); |
/* setup basic struct for cpufreq API */ |
policy->cpu = 0; |
if (max_duration < POLICY_MIN_DIV) |
policy->min = maxfreq / max_duration; |
else |
policy->min = maxfreq / POLICY_MIN_DIV; |
policy->max = maxfreq; |
policy->cur = curfreq; |
policy->governor = 0; //!!!CPUFREQ_DEFAULT_GOVERNOR; |
policy->cpuinfo.min_freq = maxfreq / max_duration; |
policy->cpuinfo.max_freq = maxfreq; |
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
return 0; |
} |
/* |
* cpufreq_gx_init: |
* MediaGX/Geode GX initialize cpufreq driver |
*/ |
static struct cpufreq_driver gx_suspmod_driver = { |
.verify = cpufreq_gx_verify, |
.target = cpufreq_gx_target, |
.init = cpufreq_gx_cpu_init, |
.name = "gx-suspmod", |
.owner = THIS_MODULE, |
}; |
/*static*/ int __init cpufreq_gx_init(void) |
{ |
int ret; |
struct gxfreq_params *params; |
struct pci_dev *gx_pci; |
u32 class_rev; |
/* Test if we have the right hardware */ |
if ((gx_pci = gx_detect_chipset()) == NULL) |
return -ENODEV; |
/* check whether module parameters are sane */ |
if (max_duration > 0xff) |
max_duration = 0xff; |
dprintk("geode suspend modulation available.\n"); |
params = kmalloc(sizeof(struct gxfreq_params), GFP_KERNEL); |
if (params == NULL) |
return -ENOMEM; |
memset(params, 0, sizeof(struct gxfreq_params)); |
params->cs55x0 = gx_pci; |
gx_params = params; |
/* keep cs55x0 configurations */ |
pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg)); |
pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1)); |
pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); |
pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); |
pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration)); |
pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev); |
params->pci_rev = class_rev && 0xff; |
if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) { |
kfree(params); |
return ret; /* register error! */ |
} |
return 0; |
} |
/*static*/ void __exit cpufreq_gx_exit(void) |
{ |
cpufreq_unregister_driver(&gx_suspmod_driver); |
kfree(gx_params); |
} |
MODULE_AUTHOR ("Hiroshi Miura <miura@da-cha.org>"); |
MODULE_DESCRIPTION ("Cpufreq driver for Cyrix MediaGX and NatSemi Geode"); |
MODULE_LICENSE ("GPL"); |
module_init(cpufreq_gx_init); |
module_exit(cpufreq_gx_exit); |
/shark/trunk/drivers/cpu/cpufreq/speedstep-centrino.c |
---|
0,0 → 1,378 |
/* |
* cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium |
* M (part of the Centrino chipset). |
* |
* Despite the "SpeedStep" in the name, this is almost entirely unlike |
* traditional SpeedStep. |
* |
* Modelled on speedstep.c |
* |
* Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org> |
* |
* WARNING WARNING WARNING |
* |
* This driver manipulates the PERF_CTL MSR, which is only somewhat |
* documented. While it seems to work on my laptop, it has not been |
* tested anywhere else, and it may not work for you, do strange |
* things or simply crash. |
*/ |
#include <linuxcomp.h> |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/init.h> |
#include <linux/cpufreq.h> |
#include <asm/msr.h> |
#include <asm/processor.h> |
#include <asm/cpufeature.h> |
#define PFX "speedstep-centrino: " |
#define MAINTAINER "Jeremy Fitzhardinge <jeremy@goop.org>" |
#define CENTRINO_DEBUG |
#ifdef CENTRINO_DEBUG |
#define dprintk(msg...) printk(msg) |
#else |
#define dprintk(msg...) do { } while(0) |
#endif |
struct cpu_model |
{ |
const char *model_name; |
unsigned max_freq; /* max clock in kHz */ |
struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */ |
}; |
/* Operating points for current CPU */ |
static const struct cpu_model *centrino_model; |
/* Computes the correct form for IA32_PERF_CTL MSR for a particular |
frequency/voltage operating point; frequency in MHz, volts in mV. |
This is stored as "index" in the structure. */ |
#define OP(mhz, mv) \ |
{ \ |
.frequency = (mhz) * 1000, \ |
.index = (((mhz)/100) << 8) | ((mv - 700) / 16) \ |
} |
/* |
* These voltage tables were derived from the Intel Pentium M |
* datasheet, document 25261202.pdf, Table 5. I have verified they |
* are consistent with my IBM ThinkPad X31, which has a 1.3GHz Pentium |
* M. |
*/ |
/* Ultra Low Voltage Intel Pentium M processor 900MHz */ |
static struct cpufreq_frequency_table op_900[] = |
{ |
OP(600, 844), |
OP(800, 988), |
OP(900, 1004), |
{ .frequency = CPUFREQ_TABLE_END } |
}; |
/* Low Voltage Intel Pentium M processor 1.10GHz */ |
static struct cpufreq_frequency_table op_1100[] = |
{ |
OP( 600, 956), |
OP( 800, 1020), |
OP( 900, 1100), |
OP(1000, 1164), |
OP(1100, 1180), |
{ .frequency = CPUFREQ_TABLE_END } |
}; |
/* Low Voltage Intel Pentium M processor 1.20GHz */ |
static struct cpufreq_frequency_table op_1200[] = |
{ |
OP( 600, 956), |
OP( 800, 1004), |
OP( 900, 1020), |
OP(1000, 1100), |
OP(1100, 1164), |
OP(1200, 1180), |
{ .frequency = CPUFREQ_TABLE_END } |
}; |
/* Intel Pentium M processor 1.30GHz */ |
static struct cpufreq_frequency_table op_1300[] = |
{ |
OP( 600, 956), |
OP( 800, 1260), |
OP(1000, 1292), |
OP(1200, 1356), |
OP(1300, 1388), |
{ .frequency = CPUFREQ_TABLE_END } |
}; |
/* Intel Pentium M processor 1.40GHz */ |
static struct cpufreq_frequency_table op_1400[] = |
{ |
OP( 600, 956), |
OP( 800, 1180), |
OP(1000, 1308), |
OP(1200, 1436), |
OP(1400, 1484), |
{ .frequency = CPUFREQ_TABLE_END } |
}; |
/* Intel Pentium M processor 1.50GHz */ |
static struct cpufreq_frequency_table op_1500[] = |
{ |
OP( 600, 956), |
OP( 800, 1116), |
OP(1000, 1228), |
OP(1200, 1356), |
OP(1400, 1452), |
OP(1500, 1484), |
{ .frequency = CPUFREQ_TABLE_END } |
}; |
/* Intel Pentium M processor 1.60GHz */ |
static struct cpufreq_frequency_table op_1600[] = |
{ |
OP( 600, 956), |
OP( 800, 1036), |
OP(1000, 1164), |
OP(1200, 1276), |
OP(1400, 1420), |
OP(1600, 1484), |
{ .frequency = CPUFREQ_TABLE_END } |
}; |
/* Intel Pentium M processor 1.70GHz */ |
static struct cpufreq_frequency_table op_1700[] = |
{ |
OP( 600, 956), |
OP( 800, 1004), |
OP(1000, 1116), |
OP(1200, 1228), |
OP(1400, 1308), |
OP(1700, 1484), |
{ .frequency = CPUFREQ_TABLE_END } |
}; |
#undef OP |
#define _CPU(max, name) \ |
{ "Intel(R) Pentium(R) M processor " name "MHz", (max)*1000, op_##max } |
#define CPU(max) _CPU(max, #max) |
/* CPU models, their operating frequency range, and freq/voltage |
operating points */ |
static const struct cpu_model models[] = |
{ |
_CPU( 900, " 900"), |
CPU(1100), |
CPU(1200), |
CPU(1300), |
CPU(1400), |
CPU(1500), |
CPU(1600), |
CPU(1700), |
{ 0, } |
}; |
#undef CPU |
/* Extract clock in kHz from PERF_CTL value */ |
static unsigned extract_clock(unsigned msr) |
{ |
msr = (msr >> 8) & 0xff; |
return msr * 100000; |
} |
/* Return the current CPU frequency in kHz */ |
static unsigned get_cur_freq(void) |
{ |
unsigned l, h; |
rdmsr(MSR_IA32_PERF_STATUS, l, h); |
return extract_clock(l); |
} |
static int centrino_cpu_init(struct cpufreq_policy *policy) |
{ |
unsigned freq; |
if (policy->cpu != 0 || centrino_model == NULL) |
return -ENODEV; |
freq = get_cur_freq(); |
policy->governor = 0; //!!!CPUFREQ_DEFAULT_GOVERNOR; |
policy->cpuinfo.transition_latency = 10; /* 10uS transition latency */ |
policy->cur = freq; |
dprintk(KERN_INFO PFX "centrino_cpu_init: policy=%d cur=%dkHz\n", |
policy->policy, policy->cur); |
return cpufreq_frequency_table_cpuinfo(policy, centrino_model->op_points); |
} |
/** |
* centrino_verify - verifies a new CPUFreq policy |
* @freq: new policy |
* |
* Limit must be within this model's frequency range at least one |
* border included. |
*/ |
static int centrino_verify (struct cpufreq_policy *policy) |
{ |
return cpufreq_frequency_table_verify(policy, centrino_model->op_points); |
} |
/** |
* centrino_setpolicy - set a new CPUFreq policy |
* @policy: new policy |
* |
* Sets a new CPUFreq policy. |
*/ |
static int centrino_target (struct cpufreq_policy *policy, |
unsigned int target_freq, |
unsigned int relation) |
{ |
unsigned int newstate = 0; |
unsigned int msr, oldmsr, h; |
struct cpufreq_freqs freqs; |
if (centrino_model == NULL) |
return -ENODEV; |
if (cpufreq_frequency_table_target(policy, centrino_model->op_points, target_freq, |
relation, &newstate)) |
return -EINVAL; |
msr = centrino_model->op_points[newstate].index; |
rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
if (msr == (oldmsr & 0xffff)) |
return 0; |
/* Hm, old frequency can either be the last value we put in |
PERF_CTL, or whatever it is now. The trouble is that TM2 |
can change it behind our back, which means we never get to |
see the speed change. Reading back the current speed would |
tell us something happened, but it may leave the things on |
the notifier chain confused; we therefore stick to using |
the last programmed speed rather than the current speed for |
"old". |
TODO: work out how the TCC interrupts work, and try to |
catch the CPU changing things under us. |
*/ |
freqs.cpu = 0; |
freqs.old = extract_clock(oldmsr); |
freqs.new = extract_clock(msr); |
dprintk(KERN_INFO PFX "target=%dkHz old=%d new=%d msr=%04x\n", |
target_freq, freqs.old, freqs.new, msr); |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
/* all but 16 LSB are "reserved", so treat them with |
care */ |
oldmsr &= ~0xffff; |
msr &= 0xffff; |
oldmsr |= msr; |
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
return 0; |
} |
static struct cpufreq_driver centrino_driver = { |
.name = "centrino", /* should be speedstep-centrino, |
but there's a 16 char limit */ |
.init = centrino_cpu_init, |
.verify = centrino_verify, |
.target = centrino_target, |
.owner = THIS_MODULE, |
}; |
/** |
* centrino_init - initializes the Enhanced SpeedStep CPUFreq driver |
* |
* Initializes the Enhanced SpeedStep support. Returns -ENODEV on |
* unsupported devices, -ENOENT if there's no voltage table for this |
* particular CPU model, -EINVAL on problems during initiatization, |
* and zero on success. |
* |
* This is quite picky. Not only does the CPU have to advertise the |
* "est" flag in the cpuid capability flags, we look for a specific |
* CPU model and stepping, and we need to have the exact model name in |
* our voltage tables. That is, be paranoid about not releasing |
* someone's valuable magic smoke. |
*/ |
/*static*/ int __init centrino_init(void) |
{ |
struct cpuinfo_x86 *cpu = &new_cpu_data; |
const struct cpu_model *model; |
unsigned l, h; |
if (!cpu_has(cpu, X86_FEATURE_EST)) |
return -ENODEV; |
/* Only Intel Pentium M stepping 5 for now - add new CPUs as |
they appear after making sure they use PERF_CTL in the same |
way. */ |
if (cpu->x86_vendor != X86_VENDOR_INTEL || |
cpu->x86 != 6 || |
cpu->x86_model != 9 || |
cpu->x86_mask != 5) { |
printk(KERN_INFO PFX "found unsupported CPU with Enhanced SpeedStep: " |
"send /proc/cpuinfo to " MAINTAINER "\n"); |
return -ENODEV; |
} |
/* Check to see if Enhanced SpeedStep is enabled, and try to |
enable it if not. */ |
rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
if (!(l & (1<<16))) { |
l |= (1<<16); |
wrmsr(MSR_IA32_MISC_ENABLE, l, h); |
/* check to see if it stuck */ |
rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
if (!(l & (1<<16))) { |
printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n"); |
return -ENODEV; |
} |
} |
for(model = models; model->model_name != NULL; model++) |
if (strcmp(cpu->x86_model_id, model->model_name) == 0) |
break; |
if (model->model_name == NULL) { |
printk(KERN_INFO PFX "no support for CPU model \"%s\": " |
"send /proc/cpuinfo to " MAINTAINER "\n", |
cpu->x86_model_id); |
return -ENOENT; |
} |
centrino_model = model; |
printk(KERN_INFO PFX "found \"%s\": max frequency: %dkHz\n", |
model->model_name, model->max_freq); |
return cpufreq_register_driver(¢rino_driver); |
} |
/*static*/ void __exit centrino_exit(void) |
{ |
cpufreq_unregister_driver(¢rino_driver); |
} |
MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>"); |
MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors."); |
MODULE_LICENSE ("GPL"); |
module_init(centrino_init); |
module_exit(centrino_exit); |
/shark/trunk/drivers/cpu/cpufreq/p4-clockmod.c |
---|
0,0 → 1,282 |
/* |
* Pentium 4/Xeon CPU on demand clock modulation/speed scaling |
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
* (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com> |
* (C) 2002 Arjan van de Ven <arjanv@redhat.com> |
* (C) 2002 Tora T. Engstad |
* All Rights Reserved |
* |
* This program is free software; you can redistribute it and/or |
* modify it under the terms of the GNU General Public License |
* as published by the Free Software Foundation; either version |
* 2 of the License, or (at your option) any later version. |
* |
* The author(s) of this software shall not be held liable for damages |
* of any nature resulting due to the use of this software. This |
* software is provided AS-IS with no warranties. |
* |
* Date Errata Description |
* 20020525 N44, O17 12.5% or 25% DC causes lockup |
* |
*/ |
#include <linuxcomp.h> |
#include <linux/config.h> |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/init.h> |
#include <linux/smp.h> |
#include <linux/cpufreq.h> |
#include <linux/slab.h> |
#include <linux/sched.h> |
#include <asm/processor.h> |
#include <asm/msr.h> |
#include <asm/timex.h> |
#define PFX "cpufreq: " |
/* |
* Duty Cycle (3bits), note DC_DISABLE is not specified in |
* intel docs i just use it to mean disable |
*/ |
enum { |
DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT, |
DC_64PT, DC_75PT, DC_88PT, DC_DISABLE |
}; |
#define DC_ENTRIES 8 |
static int has_N44_O17_errata[NR_CPUS]; |
static int stock_freq; |
static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) |
{ |
u32 l, h; |
cpumask_t cpus_allowed, affected_cpu_map; |
struct cpufreq_freqs freqs; |
int hyperthreading = 0; |
int sibling = 0; |
if (!cpu_online(cpu) || (newstate > DC_DISABLE) || |
(newstate == DC_RESV)) |
return -EINVAL; |
/* switch to physical CPU where state is to be changed*/ |
cpus_allowed = current->cpus_allowed; |
/* only run on CPU to be set, or on its sibling */ |
affected_cpu_map = cpumask_of_cpu(cpu); |
#ifdef CONFIG_X86_HT |
hyperthreading = ((cpu_has_ht) && (smp_num_siblings == 2)); |
if (hyperthreading) { |
sibling = cpu_sibling_map[cpu]; |
cpu_set(sibling, affected_cpu_map); |
} |
#endif |
set_cpus_allowed(current, affected_cpu_map); |
BUG_ON(!cpu_isset(smp_processor_id(), affected_cpu_map)); |
/* get current state */ |
rdmsr(MSR_IA32_THERM_CONTROL, l, h); |
if (l & 0x10) { |
l = l >> 1; |
l &= 0x7; |
} else |
l = DC_DISABLE; |
if (l == newstate) { |
set_cpus_allowed(current, cpus_allowed); |
return 0; |
} else if (l == DC_RESV) { |
printk(KERN_ERR PFX "BIG FAT WARNING: currently in invalid setting\n"); |
} |
/* notifiers */ |
freqs.old = stock_freq * l / 8; |
freqs.new = stock_freq * newstate / 8; |
freqs.cpu = cpu; |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
if (hyperthreading) { |
freqs.cpu = sibling; |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
} |
rdmsr(MSR_IA32_THERM_STATUS, l, h); |
#if 0 |
if (l & 0x01) |
printk(KERN_DEBUG PFX "CPU#%d currently thermal throttled\n", cpu); |
#endif |
if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT)) |
newstate = DC_38PT; |
rdmsr(MSR_IA32_THERM_CONTROL, l, h); |
if (newstate == DC_DISABLE) { |
/* printk(KERN_INFO PFX "CPU#%d disabling modulation\n", cpu); */ |
wrmsr(MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); |
} else { |
/* printk(KERN_INFO PFX "CPU#%d setting duty cycle to %d%%\n", |
cpu, ((125 * newstate) / 10)); */ |
/* bits 63 - 5 : reserved |
* bit 4 : enable/disable |
* bits 3-1 : duty cycle |
* bit 0 : reserved |
*/ |
l = (l & ~14); |
l = l | (1<<4) | ((newstate & 0x7)<<1); |
wrmsr(MSR_IA32_THERM_CONTROL, l, h); |
} |
set_cpus_allowed(current, cpus_allowed); |
/* notifiers */ |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
if (hyperthreading) { |
freqs.cpu = cpu; |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
} |
return 0; |
} |
static struct cpufreq_frequency_table p4clockmod_table[] = { |
{DC_RESV, CPUFREQ_ENTRY_INVALID}, |
{DC_DFLT, 0}, |
{DC_25PT, 0}, |
{DC_38PT, 0}, |
{DC_50PT, 0}, |
{DC_64PT, 0}, |
{DC_75PT, 0}, |
{DC_88PT, 0}, |
{DC_DISABLE, 0}, |
{DC_RESV, CPUFREQ_TABLE_END}, |
}; |
static int cpufreq_p4_target(struct cpufreq_policy *policy, |
unsigned int target_freq, |
unsigned int relation) |
{ |
unsigned int newstate = DC_RESV; |
if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate)) |
return -EINVAL; |
cpufreq_p4_setdc(policy->cpu, p4clockmod_table[newstate].index); |
return 0; |
} |
static int cpufreq_p4_verify(struct cpufreq_policy *policy) |
{ |
return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); |
} |
static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) |
{ |
struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; |
int cpuid = 0; |
unsigned int i; |
/* Errata workaround */ |
cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; |
switch (cpuid) { |
case 0x0f07: |
case 0x0f0a: |
case 0x0f11: |
case 0x0f12: |
has_N44_O17_errata[policy->cpu] = 1; |
} |
/* get frequency */ |
if (!stock_freq) { |
if (cpu_khz) |
stock_freq = cpu_khz; |
else { |
printk(KERN_INFO PFX "unknown core frequency - please use module parameter 'stock_freq'\n"); |
return -EINVAL; |
} |
} |
/* table init */ |
for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { |
if ((i<2) && (has_N44_O17_errata[policy->cpu])) |
p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; |
else |
p4clockmod_table[i].frequency = (stock_freq * i)/8; |
} |
cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); |
/* cpuinfo and default policy values */ |
policy->governor = 0; //!!!CPUFREQ_DEFAULT_GOVERNOR; |
policy->cpuinfo.transition_latency = 1000; |
policy->cur = stock_freq; |
return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); |
} |
static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) |
{ |
cpufreq_frequency_table_put_attr(policy->cpu); |
return cpufreq_p4_setdc(policy->cpu, DC_DISABLE); |
} |
static struct freq_attr* p4clockmod_attr[] = { |
&cpufreq_freq_attr_scaling_available_freqs, |
NULL, |
}; |
static struct cpufreq_driver p4clockmod_driver = { |
.verify = cpufreq_p4_verify, |
.target = cpufreq_p4_target, |
.init = cpufreq_p4_cpu_init, |
.exit = cpufreq_p4_cpu_exit, |
.name = "p4-clockmod", |
.owner = THIS_MODULE, |
.attr = p4clockmod_attr, |
}; |
/*static*/ int __init cpufreq_p4_init(void) |
{ |
struct cpuinfo_x86 *c = cpu_data; |
/* |
* THERM_CONTROL is architectural for IA32 now, so |
* we can rely on the capability checks |
*/ |
if (c->x86_vendor != X86_VENDOR_INTEL) |
return -ENODEV; |
if (!test_bit(X86_FEATURE_ACPI, c->x86_capability) || |
!test_bit(X86_FEATURE_ACC, c->x86_capability)) |
return -ENODEV; |
printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n"); |
return cpufreq_register_driver(&p4clockmod_driver); |
} |
/*static*/ void __exit cpufreq_p4_exit(void) |
{ |
cpufreq_unregister_driver(&p4clockmod_driver); |
} |
MODULE_PARM(stock_freq, "i"); |
MODULE_AUTHOR ("Zwane Mwaikambo <zwane@commfireservices.com>"); |
MODULE_DESCRIPTION ("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); |
MODULE_LICENSE ("GPL"); |
module_init(cpufreq_p4_init); |
module_exit(cpufreq_p4_exit); |
/shark/trunk/drivers/cpu/cpufreq/speedstep-ich.c |
---|
0,0 → 1,365 |
/* |
* (C) 2001 Dave Jones, Arjan van de ven. |
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
* |
* Licensed under the terms of the GNU GPL License version 2. |
* Based upon reverse engineered information, and on Intel documentation |
* for chipsets ICH2-M and ICH3-M. |
* |
* Many thanks to Ducrot Bruno for finding and fixing the last |
* "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler |
* for extensive testing. |
* |
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* |
*/ |
/********************************************************************* |
* SPEEDSTEP - DEFINITIONS * |
*********************************************************************/ |
#include <linuxcomp.h> |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/init.h> |
#include <linux/cpufreq.h> |
#include <linux/pci.h> |
#include <linux/slab.h> |
#include "speedstep-lib.h" |
/* speedstep_chipset: |
* It is necessary to know which chipset is used. As accesses to |
* this device occur at various places in this module, we need a |
* static struct pci_dev * pointing to that device. |
*/ |
static struct pci_dev *speedstep_chipset_dev; |
/* speedstep_processor |
*/ |
static unsigned int speedstep_processor = 0; |
/* |
* There are only two frequency states for each processor. Values |
* are in kHz for the time being. |
*/ |
static struct cpufreq_frequency_table speedstep_freqs[] = { |
{SPEEDSTEP_HIGH, 0}, |
{SPEEDSTEP_LOW, 0}, |
{0, CPUFREQ_TABLE_END}, |
}; |
/* DEBUG |
* Define it if you want verbose debug output, e.g. for bug reporting |
*/ |
#define SPEEDSTEP_DEBUG |
#ifdef SPEEDSTEP_DEBUG |
#define dprintk(msg...) printk(msg) |
#else |
#define dprintk(msg...) do { } while(0) |
#endif |
/** |
* speedstep_set_state - set the SpeedStep state |
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) |
* |
* Tries to change the SpeedStep state. |
*/ |
static void speedstep_set_state (unsigned int state, unsigned int notify) |
{ |
u32 pmbase; |
u8 pm2_blk; |
u8 value; |
unsigned long flags; |
struct cpufreq_freqs freqs; |
if (!speedstep_chipset_dev || (state > 0x1)) |
return; |
freqs.old = speedstep_get_processor_frequency(speedstep_processor); |
freqs.new = speedstep_freqs[state].frequency; |
freqs.cpu = 0; /* speedstep.c is UP only driver */ |
//!!!if (notify) |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
/* get PMBASE */ |
pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); |
if (!(pmbase & 0x01)) |
{ |
printk(KERN_ERR "cpufreq: could not find speedstep register\n"); |
return; |
} |
pmbase &= 0xFFFFFFFE; |
if (!pmbase) { |
printk(KERN_ERR "cpufreq: could not find speedstep register\n"); |
return; |
} |
/* Disable IRQs */ |
local_irq_save(flags); |
/* read state */ |
value = inb(pmbase + 0x50); |
dprintk(KERN_DEBUG "cpufreq: read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); |
/* write new state */ |
value &= 0xFE; |
value |= state; |
dprintk(KERN_DEBUG "cpufreq: writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); |
/* Disable bus master arbitration */ |
pm2_blk = inb(pmbase + 0x20); |
pm2_blk |= 0x01; |
outb(pm2_blk, (pmbase + 0x20)); |
/* Actual transition */ |
outb(value, (pmbase + 0x50)); |
/* Restore bus master arbitration */ |
pm2_blk &= 0xfe; |
outb(pm2_blk, (pmbase + 0x20)); |
/* check if transition was successful */ |
value = inb(pmbase + 0x50); |
/* Enable IRQs */ |
local_irq_restore(flags); |
dprintk(KERN_DEBUG "cpufreq: read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); |
if (state == (value & 0x1)) { |
dprintk (KERN_INFO "cpufreq: change to %u MHz succeeded\n", (speedstep_get_processor_frequency(speedstep_processor) / 1000)); |
} else { |
printk (KERN_ERR "cpufreq: change failed - I/O error\n"); |
} |
///!!!if (notify) |
//!!!cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
return; |
} |
/** |
* speedstep_activate - activate SpeedStep control in the chipset |
* |
* Tries to activate the SpeedStep status and control registers. |
* Returns -EINVAL on an unsupported chipset, and zero on success. |
*/ |
static int speedstep_activate (void) |
{ |
u16 value = 0; |
if (!speedstep_chipset_dev) |
return -EINVAL; |
pci_read_config_word(speedstep_chipset_dev, |
0x00A0, &value); |
if (!(value & 0x08)) { |
value |= 0x08; |
dprintk(KERN_DEBUG "cpufreq: activating SpeedStep (TM) registers\n"); |
pci_write_config_word(speedstep_chipset_dev, |
0x00A0, value); |
} |
return 0; |
} |
/** |
* speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic |
* |
* Detects PIIX4, ICH2-M and ICH3-M so far. The pci_dev points to |
* the LPC bridge / PM module which contains all power-management |
* functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected |
* chipset, or zero on failure. |
*/ |
static unsigned int speedstep_detect_chipset (void) |
{ |
speedstep_chipset_dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, |
PCI_DEVICE_ID_INTEL_82801DB_12, |
PCI_ANY_ID, |
PCI_ANY_ID, |
NULL); |
if (speedstep_chipset_dev) |
return 4; /* 4-M */ |
speedstep_chipset_dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, |
PCI_DEVICE_ID_INTEL_82801CA_12, |
PCI_ANY_ID, |
PCI_ANY_ID, |
NULL); |
if (speedstep_chipset_dev) |
return 3; /* 3-M */ |
speedstep_chipset_dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, |
PCI_DEVICE_ID_INTEL_82801BA_10, |
PCI_ANY_ID, |
PCI_ANY_ID, |
NULL); |
if (speedstep_chipset_dev) { |
/* speedstep.c causes lockups on Dell Inspirons 8000 and |
* 8100 which use a pretty old revision of the 82815 |
* host brige. Abort on these systems. |
*/ |
static struct pci_dev *hostbridge; |
u8 rev = 0; |
hostbridge = pci_find_subsys(PCI_VENDOR_ID_INTEL, |
PCI_DEVICE_ID_INTEL_82815_MC, |
PCI_ANY_ID, |
PCI_ANY_ID, |
NULL); |
if (!hostbridge) |
return 2; /* 2-M */ |
pci_read_config_byte(hostbridge, PCI_REVISION_ID, &rev); |
if (rev < 5) { |
dprintk(KERN_INFO "cpufreq: hostbridge does not support speedstep\n"); |
speedstep_chipset_dev = NULL; |
return 0; |
} |
return 2; /* 2-M */ |
} |
return 0; |
} |
/** |
* speedstep_setpolicy - set a new CPUFreq policy |
* @policy: new policy |
* |
* Sets a new CPUFreq policy. |
*/ |
static int speedstep_target (struct cpufreq_policy *policy, |
unsigned int target_freq, |
unsigned int relation) |
{ |
unsigned int newstate = 0; |
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) |
return -EINVAL; |
speedstep_set_state(newstate, 1); |
return 0; |
} |
/** |
* speedstep_verify - verifies a new CPUFreq policy |
* @freq: new policy |
* |
* Limit must be within speedstep_low_freq and speedstep_high_freq, with |
* at least one border included. |
*/ |
static int speedstep_verify (struct cpufreq_policy *policy) |
{ |
return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); |
} |
static int speedstep_cpu_init(struct cpufreq_policy *policy) |
{ |
int result = 0; |
unsigned int speed; |
/* capability check */ |
if (policy->cpu != 0) |
return -ENODEV; |
/* detect low and high frequency */ |
result = speedstep_get_freqs(speedstep_processor, |
&speedstep_freqs[SPEEDSTEP_LOW].frequency, |
&speedstep_freqs[SPEEDSTEP_HIGH].frequency, |
&speedstep_set_state); |
if (result) |
return result; |
/* get current speed setting */ |
speed = speedstep_get_processor_frequency(speedstep_processor); |
if (!speed) |
return -EIO; |
dprintk(KERN_INFO "cpufreq: currently at %s speed setting - %i MHz\n", |
(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", |
(speed / 1000)); |
/* cpuinfo and default policy values */ |
policy->governor = 0; //!!!CPUFREQ_DEFAULT_GOVERNOR; |
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
policy->cur = speed; |
return cpufreq_frequency_table_cpuinfo(policy, &speedstep_freqs[0]); |
} |
static struct cpufreq_driver speedstep_driver = { |
.name = "speedstep-ich", |
.verify = speedstep_verify, |
.target = speedstep_target, |
.init = speedstep_cpu_init, |
.owner = THIS_MODULE, |
}; |
/** |
* speedstep_init - initializes the SpeedStep CPUFreq driver |
* |
* Initializes the SpeedStep support. Returns -ENODEV on unsupported |
* devices, -EINVAL on problems during initiatization, and zero on |
* success. |
*/ |
/*static*/ int __init speedstep_ich_init(void) |
{ |
/* detect processor */ |
speedstep_processor = speedstep_detect_processor(); |
if (!speedstep_processor) |
return -ENODEV; |
/* detect chipset */ |
if (!speedstep_detect_chipset()) { |
printk(KERN_INFO "cpufreq: Intel(R) SpeedStep(TM) for this chipset not (yet) available.\n"); |
return -ENODEV; |
} |
/* activate speedstep support */ |
if (speedstep_activate()) |
return -EINVAL; |
return cpufreq_register_driver(&speedstep_driver); |
} |
/** |
* speedstep_exit - unregisters SpeedStep support |
* |
* Unregisters SpeedStep support. |
*/ |
/*static*/ void __exit speedstep_ich_exit(void) |
{ |
cpufreq_unregister_driver(&speedstep_driver); |
} |
MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>"); |
MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors on chipsets with ICH-M southbridges."); |
MODULE_LICENSE ("GPL"); |
module_init(speedstep_ich_init); |
module_exit(speedstep_ich_exit); |
/shark/trunk/drivers/cpu/shark/shark_cpu.c |
---|
0,0 → 1,212 |
/* |
* Project: S.Ha.R.K. |
* |
* Coordinators: |
* Giorgio Buttazzo <giorgio@sssup.it> |
* Paolo Gai <pj@gandalf.sssup.it> |
* |
* Authors : |
* Mauro Marinoni <mauro.marinoni@unipv.it> |
* |
* |
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy) |
* |
* http://www.sssup.it |
* http://retis.sssup.it |
* http://shark.sssup.it |
*/ |
#include <kernel/func.h> |
#include "../include/drivers/shark_cpu26.h" |
#define __CPU26_DEBUG__ |
/* CPU Initialization */ |
extern void early_cpu_init(void); |
extern void identify_cpu_0(void); |
extern void print_cpu_info_0(void); |
/* AMD K6 PowerNow */ |
extern int powernow_k6_init(void); |
extern void powernow_k6_exit(void); |
/* AMD K7 PowerNow */ |
extern int powernow_init(void); |
extern void powernow_exit(void); |
/* AMD K8 PowerNow */ |
extern int powernowk8_init(void); |
extern void powernowk8_exit(void); |
/* Cyrix MediaGX - NatSemi Geode */ |
extern int cpufreq_gx_init(void); |
extern void cpufreq_gx_exit(void); |
/* Pentium4 clock modulation/speed scaling */ |
extern int cpufreq_p4_init(void); |
extern void cpufreq_p4_exit(void); |
/* PentiumM/Centrino SpeedStep */ |
extern int centrino_init(void); |
extern void centrino_exit(void); |
/* Pentium ICH SpeedStep */ |
extern int speedstep_ich_init(void); |
extern void speedstep_ich_exit(void); |
/* Pentium SMI SpeedStep */ |
/*extern int speedstep_smi_init(void); |
extern void speedstep_smi_exit(void);*/ |
static int cpu_installed = FALSE; |
static int dvs_installed = DVS_NONE; |
/* Init the Linux CPU Layer */ |
int CPU26_installed(void) |
{ |
return cpu_installed; |
} |
void CPU26_showinfo(void) |
{ |
print_cpu_info_0(); |
} |
int CPU26_init(void) |
{ |
int ret = 0; |
if (cpu_installed == TRUE) return 0; |
early_cpu_init(); |
identify_cpu_0(); |
printk(KERN_INFO); |
print_cpu_info_0(); |
cpu_installed = TRUE; |
return ret; |
} |
int CPU26_close(void) |
{ |
if (cpu_installed == TRUE) { |
return 0; |
} else |
return -1; |
} |
int CPU26_initDVS(void) |
{ |
int ret = 0; |
if (cpu_installed == FALSE) |
return -1; |
if (dvs_installed != DVS_NONE) |
return 0; |
ret = powernow_k6_init(); |
#ifdef __CPU26_DEBUG__ |
printk(KERN_DEBUG "Check PowerNow! K6 - Returned: %d\n", ret); |
#endif |
if (!ret) { |
dvs_installed = DVS_POWERNOW_K6; |
return dvs_installed; |
} |
ret = powernow_init(); |
#ifdef __CPU26_DEBUG__ |
printk(KERN_DEBUG "Check PowerNow! K7 - Returned: %d\n", ret); |
#endif |
if (!ret) { |
dvs_installed = DVS_POWERNOW_K7; |
return dvs_installed; |
} |
ret = powernowk8_init(); |
#ifdef __CPU26_DEBUG__ |
printk(KERN_DEBUG "Check PowerNow! K8 - Returned: %d\n", ret); |
#endif |
if (!ret) { |
dvs_installed = DVS_POWERNOW_K8; |
return dvs_installed; |
} |
ret = cpufreq_gx_init(); |
#ifdef __CPU26_DEBUG__ |
printk(KERN_DEBUG "Check MediaGX/Geode (Returned: %d)\n", ret); |
#endif |
if (!ret) { |
dvs_installed = DVS_MEDIAGX_GEODE; |
return dvs_installed; |
} |
ret = cpufreq_p4_init(); |
#ifdef __CPU26_DEBUG__ |
printk(KERN_DEBUG "Check Pentium4 ClockModulation (Returned: %d)\n", ret); |
#endif |
if (!ret) { |
dvs_installed = DVS_P4_CLOCK_MOD; |
return dvs_installed; |
} |
ret = centrino_init(); |
#ifdef __CPU26_DEBUG__ |
printk(KERN_DEBUG "Check SpeedStep Centrino (Returned: %d)\n", ret); |
#endif |
if (!ret) { |
dvs_installed = DVS_SS_CENTRINO; |
return dvs_installed; |
} |
ret = speedstep_ich_init(); |
#ifdef __CPU26_DEBUG__ |
printk(KERN_DEBUG "Check SpeedStep ICH (Returned: %d)\n", ret); |
#endif |
if (!ret) { |
dvs_installed = DVS_SS_ICH; |
return dvs_installed; |
} |
/*ret = speedstep_smi_init(); |
#ifdef __CPU26_DEBUG__ |
printk(KERN_DEBUG "Check SpeedStep SMI (Returned: %d)\n", ret); |
#endif |
if (!ret) { |
dvs_installed = DVS_SS_SMI; |
return dvs_installed; |
}*/ |
return -1; |
} |
int CPU26_closeDVS(void) |
{ |
switch(dvs_installed) { |
case DVS_NONE: |
return -1; |
case DVS_POWERNOW_K6: |
powernow_k6_exit(); |
return 0; |
case DVS_POWERNOW_K7: |
powernow_exit(); |
return 0; |
case DVS_POWERNOW_K8: |
powernowk8_exit(); |
return 0; |
case DVS_MEDIAGX_GEODE: |
cpufreq_gx_exit(); |
return 0; |
case DVS_P4_CLOCK_MOD: |
cpufreq_p4_exit(); |
return 0; |
case DVS_SS_CENTRINO: |
centrino_exit(); |
return 0; |
case DVS_SS_ICH: |
speedstep_ich_exit(); |
return 0; |
/*case DVS_SS_SMI: |
speedstep_smi_exit(); |
return 0;*/ |
} |
dvs_installed = DVS_NONE; |
return 0; |
} |
/shark/trunk/drivers/cpu/common.c |
---|
0,0 → 1,582 |
#include <linuxcomp.h> |
#include <linux/init.h> |
#include <linux/string.h> |
#include <linux/delay.h> |
#include <linux/smp.h> |
#include <asm/semaphore.h> |
#include <asm/processor.h> |
#include <asm/msr.h> |
#include <asm/io.h> |
#include <asm/mmu_context.h> |
#include "cpu.h" |
/* Added by Nino - Begin */ |
//#define __CPU_DEBUG__ |
/*!!! DEBUG */ |
unsigned long cpu_khz = 1000000; |
/*!!! DEBUG */ |
int disable_pse __initdata = 0; |
/* |
* Machine setup.. |
*/ |
/* cpu data as detected by the assembly code in head.S */ |
struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; |
/* common cpu data for all cpus */ |
struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; |
unsigned long mmu_cr4_features; |
int tsc_disable __initdata = 0; /* TODO */ |
/* Added by Nino - END */ |
static int cachesize_override __initdata = -1; |
static int disable_x86_fxsr __initdata = 0; |
static int disable_x86_serial_nr __initdata = 1; |
struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; |
extern void mcheck_init(struct cpuinfo_x86 *c); |
extern int disable_pse; |
static void default_init(struct cpuinfo_x86 * c) |
{ |
/* Not much we can do here... */ |
/* Check if at least it has cpuid */ |
if (c->cpuid_level == -1) { |
/* No cpuid. It must be an ancient CPU */ |
if (c->x86 == 4) |
strcpy(c->x86_model_id, "486"); |
else if (c->x86 == 3) |
strcpy(c->x86_model_id, "386"); |
} |
} |
static struct cpu_dev default_cpu = { |
.c_init = default_init, |
}; |
static struct cpu_dev * this_cpu = &default_cpu; |
static int __init cachesize_setup(char *str) |
{ |
get_option (&str, &cachesize_override); |
return 1; |
} |
__setup("cachesize=", cachesize_setup); |
int __init get_model_name(struct cpuinfo_x86 *c) |
{ |
unsigned int *v; |
char *p, *q; |
if (cpuid_eax(0x80000000) < 0x80000004) |
return 0; |
v = (unsigned int *) c->x86_model_id; |
cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); |
cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); |
c->x86_model_id[48] = 0; |
/* Intel chips right-justify this string for some dumb reason; |
undo that brain damage */ |
p = q = &c->x86_model_id[0]; |
while ( *p == ' ' ) |
p++; |
if ( p != q ) { |
while ( *p ) |
*q++ = *p++; |
while ( q <= &c->x86_model_id[48] ) |
*q++ = '\0'; /* Zero-pad the rest */ |
} |
return 1; |
} |
void __init display_cacheinfo(struct cpuinfo_x86 *c) |
{ |
unsigned int n, dummy, ecx, edx, l2size; |
n = cpuid_eax(0x80000000); |
if (n >= 0x80000005) { |
cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); |
printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
c->x86_cache_size=(ecx>>24)+(edx>>24); |
} |
if (n < 0x80000006) // Some chips just has a large L1. |
return; |
ecx = cpuid_ecx(0x80000006); |
l2size = ecx >> 16; |
/* do processor-specific cache resizing */ |
if (this_cpu->c_size_cache) |
l2size = this_cpu->c_size_cache(c,l2size); |
/* Allow user to override all this if necessary. */ |
if (cachesize_override != -1) |
l2size = cachesize_override; |
if ( l2size == 0 ) |
return; // Again, no L2 cache is possible |
c->x86_cache_size = l2size; |
printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", |
l2size, ecx & 0xFF); |
} |
/* Naming convention should be: <Name> [(<Codename>)] */ |
/* This table only is used unless init_<vendor>() below doesn't set it; */ |
/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ |
/* Look up CPU names by table lookup. */ |
static char __init *table_lookup_model(struct cpuinfo_x86 *c) |
{ |
struct cpu_model_info *info; |
if ( c->x86_model >= 16 ) |
return NULL; /* Range check */ |
if (!this_cpu) |
return NULL; |
info = this_cpu->c_models; |
while (info && info->family) { |
if (info->family == c->x86) |
return info->model_names[c->x86_model]; |
info++; |
} |
return NULL; /* Not found */ |
} |
void __init get_cpu_vendor(struct cpuinfo_x86 *c) |
{ |
char *v = c->x86_vendor_id; |
int i; |
for (i = 0; i < X86_VENDOR_NUM; i++) { |
if (cpu_devs[i]) { |
if (!strcmp(v,cpu_devs[i]->c_ident[0]) || |
(cpu_devs[i]->c_ident[1] && |
!strcmp(v,cpu_devs[i]->c_ident[1]))) { |
c->x86_vendor = i; |
this_cpu = cpu_devs[i]; |
break; |
} |
} |
} |
} |
static int __init x86_fxsr_setup(char * s) |
{ |
disable_x86_fxsr = 1; |
return 1; |
} |
__setup("nofxsr", x86_fxsr_setup); |
/* Standard macro to see if a specific flag is changeable */ |
static inline int flag_is_changeable_p(u32 flag) |
{ |
u32 f1, f2; |
asm("pushfl\n\t" |
"pushfl\n\t" |
"popl %0\n\t" |
"movl %0,%1\n\t" |
"xorl %2,%0\n\t" |
"pushl %0\n\t" |
"popfl\n\t" |
"pushfl\n\t" |
"popl %0\n\t" |
"popfl\n\t" |
: "=&r" (f1), "=&r" (f2) |
: "ir" (flag)); |
return ((f1^f2) & flag) != 0; |
} |
/* Probe for the CPUID instruction */ |
int __init have_cpuid_p(void) |
{ |
return flag_is_changeable_p(X86_EFLAGS_ID); |
} |
void __init generic_identify(struct cpuinfo_x86 * c) |
{ |
u32 tfms, xlvl; |
int junk; |
if (have_cpuid_p()) { |
/* Get vendor name */ |
cpuid(0x00000000, &c->cpuid_level, |
(int *)&c->x86_vendor_id[0], |
(int *)&c->x86_vendor_id[8], |
(int *)&c->x86_vendor_id[4]); |
get_cpu_vendor(c); |
/* Initialize the standard set of capabilities */ |
/* Note that the vendor-specific code below might override */ |
/* Intel-defined flags: level 0x00000001 */ |
if ( c->cpuid_level >= 0x00000001 ) { |
u32 capability, excap; |
cpuid(0x00000001, &tfms, &junk, &excap, &capability); |
c->x86_capability[0] = capability; |
c->x86_capability[4] = excap; |
c->x86 = (tfms >> 8) & 15; |
c->x86_model = (tfms >> 4) & 15; |
if (c->x86 == 0xf) { |
c->x86 += (tfms >> 20) & 0xff; |
c->x86_model += ((tfms >> 16) & 0xF) << 4; |
} |
c->x86_mask = tfms & 15; |
} else { |
/* Have CPUID level 0 only - unheard of */ |
c->x86 = 4; |
} |
/* AMD-defined flags: level 0x80000001 */ |
xlvl = cpuid_eax(0x80000000); |
if ( (xlvl & 0xffff0000) == 0x80000000 ) { |
if ( xlvl >= 0x80000001 ) |
c->x86_capability[1] = cpuid_edx(0x80000001); |
if ( xlvl >= 0x80000004 ) |
get_model_name(c); /* Default name */ |
} |
} |
} |
static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
{ |
if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { |
/* Disable processor serial number */ |
unsigned long lo,hi; |
rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); |
lo |= 0x200000; |
wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); |
printk(KERN_NOTICE "CPU serial number disabled.\n"); |
clear_bit(X86_FEATURE_PN, c->x86_capability); |
/* Disabling the serial number may affect the cpuid level */ |
c->cpuid_level = cpuid_eax(0); |
} |
} |
static int __init x86_serial_nr_setup(char *s) |
{ |
disable_x86_serial_nr = 0; |
return 1; |
} |
__setup("serialnumber", x86_serial_nr_setup); |
/* |
* This does the hard work of actually picking apart the CPU stuff... |
*/ |
void __init identify_cpu(struct cpuinfo_x86 *c) |
{ |
int i; |
c->loops_per_jiffy = 1; //!!!loops_per_jiffy; |
c->x86_cache_size = -1; |
c->x86_vendor = X86_VENDOR_UNKNOWN; |
c->cpuid_level = -1; /* CPUID not detected */ |
c->x86_model = c->x86_mask = 0; /* So far unknown... */ |
c->x86_vendor_id[0] = '\0'; /* Unset */ |
c->x86_model_id[0] = '\0'; /* Unset */ |
memset(&c->x86_capability, 0, sizeof c->x86_capability); |
if (!have_cpuid_p()) { |
/* First of all, decide if this is a 486 or higher */ |
/* It's a 486 if we can modify the AC flag */ |
if ( flag_is_changeable_p(X86_EFLAGS_AC) ) |
c->x86 = 4; |
else |
c->x86 = 3; |
} |
generic_identify(c); |
#ifdef __CPU_DEBUG__ |
printk(KERN_DEBUG "CPU: After generic identify, caps: %08lx %08lx %08lx %08lx\n", |
c->x86_capability[0], |
c->x86_capability[1], |
c->x86_capability[2], |
c->x86_capability[3]); |
#endif |
if (this_cpu->c_identify) { |
this_cpu->c_identify(c); |
#ifdef __CPU_DEBUG__ |
printk(KERN_DEBUG "CPU: After vendor identify, caps: %08lx %08lx %08lx %08lx\n", |
c->x86_capability[0], |
c->x86_capability[1], |
c->x86_capability[2], |
c->x86_capability[3]); |
#endif |
} |
/* |
* Vendor-specific initialization. In this section we |
* canonicalize the feature flags, meaning if there are |
* features a certain CPU supports which CPUID doesn't |
* tell us, CPUID claiming incorrect flags, or other bugs, |
* we handle them here. |
* |
* At the end of this section, c->x86_capability better |
* indicate the features this CPU genuinely supports! |
*/ |
if (this_cpu->c_init) |
this_cpu->c_init(c); |
/* Disable the PN if appropriate */ |
squash_the_stupid_serial_number(c); |
/* |
* The vendor-specific functions might have changed features. Now |
* we do "generic changes." |
*/ |
/* TSC disabled? */ |
/*!!!if ( tsc_disable ) |
clear_bit(X86_FEATURE_TSC, c->x86_capability);*/ |
/* FXSR disabled? */ |
if (disable_x86_fxsr) { |
clear_bit(X86_FEATURE_FXSR, c->x86_capability); |
clear_bit(X86_FEATURE_XMM, c->x86_capability); |
} |
if (disable_pse) |
clear_bit(X86_FEATURE_PSE, c->x86_capability); |
/* If the model name is still unset, do table lookup. */ |
if ( !c->x86_model_id[0] ) { |
char *p; |
p = table_lookup_model(c); |
if ( p ) |
strcpy(c->x86_model_id, p); |
else |
/* Last resort... */ |
sprintf26(c->x86_model_id, "%02x/%02x", |
c->x86_vendor, c->x86_model); |
} |
/* Now the feature flags better reflect actual CPU features! */ |
#ifdef __CPU_DEBUG__ |
printk(KERN_DEBUG "CPU: After all inits, caps: %08lx %08lx %08lx %08lx\n", |
c->x86_capability[0], |
c->x86_capability[1], |
c->x86_capability[2], |
c->x86_capability[3]); |
#endif |
/* |
* On SMP, boot_cpu_data holds the common feature set between |
* all CPUs; so make sure that we indicate which features are |
* common between the CPUs. The first time this routine gets |
* executed, c == &boot_cpu_data. |
*/ |
if ( c != &boot_cpu_data ) { |
// AND the already accumulated flags with these |
for ( i = 0 ; i < NCAPINTS ; i++ ) |
boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
} |
/* Init Machine Check Exception if available. */ |
#ifdef CONFIG_X86_MCE |
mcheck_init(c); |
#endif |
} |
/* |
* Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c |
*/ |
void __init dodgy_tsc(void) |
{ |
get_cpu_vendor(&boot_cpu_data); |
if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) || |
( boot_cpu_data.x86_vendor == X86_VENDOR_NSC )) |
cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data); |
} |
void __init print_cpu_info(struct cpuinfo_x86 *c) |
{ |
char *vendor = NULL; |
if (c->x86_vendor < X86_VENDOR_NUM) |
vendor = this_cpu->c_vendor; |
else if (c->cpuid_level >= 0) |
vendor = c->x86_vendor_id; |
if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) |
printk("%s ", vendor); |
if (!c->x86_model_id[0]) |
printk("%d86", c->x86); |
else |
printk("%s", c->x86_model_id); |
if (c->x86_mask || c->cpuid_level >= 0) |
printk(" stepping %02x\n", c->x86_mask); |
else |
printk("\n"); |
} |
unsigned long cpu_initialized __initdata = 0; |
/* This is hacky. :) |
* We're emulating future behavior. |
* In the future, the cpu-specific init functions will be called implicitly |
* via the magic of initcalls. |
* They will insert themselves into the cpu_devs structure. |
* Then, when cpu_init() is called, we can just iterate over that array. |
*/ |
extern int intel_cpu_init(void); |
extern int cyrix_init_cpu(void); |
extern int nsc_init_cpu(void); |
extern int amd_init_cpu(void); |
extern int centaur_init_cpu(void); |
extern int transmeta_init_cpu(void); |
extern int rise_init_cpu(void); |
extern int nexgen_init_cpu(void); |
extern int umc_init_cpu(void); |
void __init early_cpu_init(void) |
{ |
intel_cpu_init(); |
cyrix_init_cpu(); |
nsc_init_cpu(); |
amd_init_cpu(); |
#ifdef CONFIG_DEBUG_PAGEALLOC |
/* pse is not compatible with on-the-fly unmapping, |
* disable it even if the cpus claim to support it. |
*/ |
clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); |
disable_pse = 1; |
#endif |
} |
/* |
* cpu_init() initializes state that is per-CPU. Some data is already |
* initialized (naturally) in the bootstrap process, such as the GDT |
* and IDT. We reload them nevertheless, this function acts as a |
* 'CPU state barrier', nothing should get across. |
*/ |
void __init cpu_init (void) |
{ |
int cpu = smp_processor_id(); |
/*!!!struct tss_struct * t = init_tss + cpu; |
struct thread_struct *thread = ¤t->thread;*/ |
if (test_and_set_bit(cpu, &cpu_initialized)) { |
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); |
for (;;) local_irq_enable(); |
} |
printk(KERN_INFO "Initializing CPU#%d\n", cpu); |
if (cpu_has_vme || cpu_has_tsc || cpu_has_de) |
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
if (tsc_disable && cpu_has_tsc) { |
printk(KERN_NOTICE "Disabling TSC...\n"); |
/**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/ |
clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); |
set_in_cr4(X86_CR4_TSD); |
} |
/* |
* Initialize the per-CPU GDT with the boot GDT, |
* and set up the GDT descriptor: |
*/ |
/*!!!if (cpu) { |
memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE); |
cpu_gdt_descr[cpu].size = GDT_SIZE - 1; |
cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu]; |
}*/ |
/* |
* Set up the per-thread TLS descriptor cache: |
*/ |
/*!!!memcpy(thread->tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8); |
__asm__ __volatile__("lgdt %0" : : "m" (cpu_gdt_descr[cpu])); |
__asm__ __volatile__("lidt %0" : : "m" (idt_descr));*/ |
/* |
* Delete NT |
*/ |
__asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl"); |
/* |
* Set up and load the per-CPU TSS and LDT |
*/ |
/*!!!atomic_inc(&init_mm.mm_count); |
current->active_mm = &init_mm; |
if (current->mm) |
BUG(); |
enter_lazy_tlb(&init_mm, current); |
load_esp0(t, thread->esp0); |
set_tss_desc(cpu,t); |
cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff; |
load_TR_desc(); |
load_LDT(&init_mm.context);*/ |
/* Set up doublefault TSS pointer in the GDT */ |
/*!!!__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); |
cpu_gdt_table[cpu][GDT_ENTRY_DOUBLEFAULT_TSS].b &= 0xfffffdff;*/ |
/* Clear %fs and %gs. */ |
asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); |
/* Clear all 6 debug registers: */ |
#define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) ); |
CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7); |
#undef CD |
/* |
* Force FPU initialization: |
*/ |
current_thread_info()->status = 0; |
current->used_math = 0; |
stts(); |
} |
/* Added by Nino - Begin */ |
void identify_cpu_0(void) |
{ |
identify_cpu(&new_cpu_data); |
} |
void print_cpu_info_0(void) |
{ |
print_cpu_info(&new_cpu_data); |
} |
/* Added by Nino - End */ |
/shark/trunk/drivers/cpu/intel.c |
---|
0,0 → 1,421 |
#include <linuxcomp.h> |
#include <linux/init.h> |
#include <linux/kernel.h> |
#include <linux/string.h> |
#include <linux/bitops.h> |
#include <linux/smp.h> |
#include <linux/thread_info.h> |
#include <asm/processor.h> |
#include <asm/msr.h> |
#include <asm/uaccess.h> |
#include "cpu.h" |
extern int trap_init_f00f_bug(void); |
#ifdef CONFIG_X86_INTEL_USERCOPY |
/* |
* Alignment at which movsl is preferred for bulk memory copies. |
*/ |
struct movsl_mask movsl_mask; |
#endif |
/* |
* Early probe support logic for ppro memory erratum #50 |
* |
* This is called before we do cpu ident work |
*/ |
int __init ppro_with_ram_bug(void) |
{ |
char vendor_id[16]; |
int ident; |
/* Must have CPUID */ |
if(!have_cpuid_p()) |
return 0; |
if(cpuid_eax(0)<1) |
return 0; |
/* Must be Intel */ |
cpuid(0, &ident, |
(int *)&vendor_id[0], |
(int *)&vendor_id[8], |
(int *)&vendor_id[4]); |
if(memcmp(vendor_id, "IntelInside", 12)) |
return 0; |
ident = cpuid_eax(1); |
/* Model 6 */ |
if(((ident>>8)&15)!=6) |
return 0; |
/* Pentium Pro */ |
if(((ident>>4)&15)!=1) |
return 0; |
if((ident&15) < 8) |
{ |
printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); |
return 1; |
} |
printk(KERN_INFO "Your Pentium Pro seems ok.\n"); |
return 0; |
} |
#define LVL_1_INST 1 |
#define LVL_1_DATA 2 |
#define LVL_2 3 |
#define LVL_3 4 |
#define LVL_TRACE 5 |
struct _cache_table |
{ |
unsigned char descriptor; |
char cache_type; |
short size; |
}; |
/* all the cache descriptor types we care about (no TLB or trace cache entries) */ |
static struct _cache_table cache_table[] __initdata = |
{ |
{ 0x06, LVL_1_INST, 8 }, |
{ 0x08, LVL_1_INST, 16 }, |
{ 0x0a, LVL_1_DATA, 8 }, |
{ 0x0c, LVL_1_DATA, 16 }, |
{ 0x22, LVL_3, 512 }, |
{ 0x23, LVL_3, 1024 }, |
{ 0x25, LVL_3, 2048 }, |
{ 0x29, LVL_3, 4096 }, |
{ 0x2c, LVL_1_DATA, 32 }, |
{ 0x30, LVL_1_INST, 32 }, |
{ 0x39, LVL_2, 128 }, |
{ 0x3b, LVL_2, 128 }, |
{ 0x3c, LVL_2, 256 }, |
{ 0x41, LVL_2, 128 }, |
{ 0x42, LVL_2, 256 }, |
{ 0x43, LVL_2, 512 }, |
{ 0x44, LVL_2, 1024 }, |
{ 0x45, LVL_2, 2048 }, |
{ 0x66, LVL_1_DATA, 8 }, |
{ 0x67, LVL_1_DATA, 16 }, |
{ 0x68, LVL_1_DATA, 32 }, |
{ 0x70, LVL_TRACE, 12 }, |
{ 0x71, LVL_TRACE, 16 }, |
{ 0x72, LVL_TRACE, 32 }, |
{ 0x79, LVL_2, 128 }, |
{ 0x7a, LVL_2, 256 }, |
{ 0x7b, LVL_2, 512 }, |
{ 0x7c, LVL_2, 1024 }, |
{ 0x82, LVL_2, 256 }, |
{ 0x83, LVL_2, 512 }, |
{ 0x84, LVL_2, 1024 }, |
{ 0x85, LVL_2, 2048 }, |
{ 0x86, LVL_2, 512 }, |
{ 0x87, LVL_2, 1024 }, |
{ 0x00, 0, 0} |
}; |
/* |
* P4 Xeon errata 037 workaround. |
* Hardware prefetcher may cause stale data to be loaded into the cache. |
*/ |
static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c) |
{ |
unsigned long lo, hi; |
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
rdmsr (MSR_IA32_MISC_ENABLE, lo, hi); |
if ((lo & (1<<9)) == 0) { |
printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); |
lo |= (1<<9); /* Disable hw prefetching */ |
wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); |
} |
} |
} |
static void __init init_intel(struct cpuinfo_x86 *c) |
{ |
char *p = NULL; |
unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ |
#ifdef CONFIG_X86_F00F_BUG |
/* |
* All current models of Pentium and Pentium with MMX technology CPUs |
* have the F0 0F bug, which lets nonprivileged users lock up the system. |
* Note that the workaround only should be initialized once... |
*/ |
c->f00f_bug = 0; |
if ( c->x86 == 5 ) { |
static int f00f_workaround_enabled = 0; |
c->f00f_bug = 1; |
if ( !f00f_workaround_enabled ) { |
trap_init_f00f_bug(); |
printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); |
f00f_workaround_enabled = 1; |
} |
} |
#endif |
//!!!select_idle_routine(c); |
if (c->cpuid_level > 1) { |
/* supports eax=2 call */ |
int i, j, n; |
int regs[4]; |
unsigned char *dp = (unsigned char *)regs; |
/* Number of times to iterate */ |
n = cpuid_eax(2) & 0xFF; |
for ( i = 0 ; i < n ; i++ ) { |
cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); |
/* If bit 31 is set, this is an unknown format */ |
for ( j = 0 ; j < 3 ; j++ ) { |
if ( regs[j] < 0 ) regs[j] = 0; |
} |
/* Byte 0 is level count, not a descriptor */ |
for ( j = 1 ; j < 16 ; j++ ) { |
unsigned char des = dp[j]; |
unsigned char k = 0; |
/* look up this descriptor in the table */ |
while (cache_table[k].descriptor != 0) |
{ |
if (cache_table[k].descriptor == des) { |
switch (cache_table[k].cache_type) { |
case LVL_1_INST: |
l1i += cache_table[k].size; |
break; |
case LVL_1_DATA: |
l1d += cache_table[k].size; |
break; |
case LVL_2: |
l2 += cache_table[k].size; |
break; |
case LVL_3: |
l3 += cache_table[k].size; |
break; |
case LVL_TRACE: |
trace += cache_table[k].size; |
break; |
} |
break; |
} |
k++; |
} |
} |
} |
if ( trace ) |
printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); |
else if ( l1i ) |
printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); |
if ( l1d ) |
printk(", L1 D cache: %dK\n", l1d); |
if ( l2 ) |
printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); |
if ( l3 ) |
printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); |
/* |
* This assumes the L3 cache is shared; it typically lives in |
* the northbridge. The L1 caches are included by the L2 |
* cache, and so should not be included for the purpose of |
* SMP switching weights. |
*/ |
c->x86_cache_size = l2 ? l2 : (l1i+l1d); |
} |
/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ |
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) |
clear_bit(X86_FEATURE_SEP, c->x86_capability); |
/* Names for the Pentium II/Celeron processors |
detectable only by also checking the cache size. |
Dixon is NOT a Celeron. */ |
if (c->x86 == 6) { |
switch (c->x86_model) { |
case 5: |
if (c->x86_mask == 0) { |
if (l2 == 0) |
p = "Celeron (Covington)"; |
else if (l2 == 256) |
p = "Mobile Pentium II (Dixon)"; |
} |
break; |
case 6: |
if (l2 == 128) |
p = "Celeron (Mendocino)"; |
else if (c->x86_mask == 0 || c->x86_mask == 5) |
p = "Celeron-A"; |
break; |
case 8: |
if (l2 == 128) |
p = "Celeron (Coppermine)"; |
break; |
} |
} |
if ( p ) |
strcpy(c->x86_model_id, p); |
#ifdef CONFIG_X86_HT |
if (cpu_has(c, X86_FEATURE_HT)) { |
extern int phys_proc_id[NR_CPUS]; |
u32 eax, ebx, ecx, edx; |
int cpu = smp_processor_id(); |
cpuid(1, &eax, &ebx, &ecx, &edx); |
smp_num_siblings = (ebx & 0xff0000) >> 16; |
if (smp_num_siblings == 1) { |
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
} else if (smp_num_siblings > 1 ) { |
/* |
* At this point we only support two siblings per |
* processor package. |
*/ |
#define NR_SIBLINGS 2 |
if (smp_num_siblings != NR_SIBLINGS) { |
printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); |
smp_num_siblings = 1; |
goto too_many_siblings; |
} |
/* cpuid returns the value latched in the HW at reset, |
* not the APIC ID register's value. For any box |
* whose BIOS changes APIC IDs, like clustered APIC |
* systems, we must use hard_smp_processor_id. |
* See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. |
*/ |
phys_proc_id[cpu] = hard_smp_processor_id() & ~(smp_num_siblings - 1); |
printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
phys_proc_id[cpu]); |
} |
} |
too_many_siblings: |
#endif |
/* Work around errata */ |
Intel_errata_workarounds(c); |
#ifdef CONFIG_X86_INTEL_USERCOPY |
/* |
* Set up the preferred alignment for movsl bulk memory moves |
*/ |
switch (c->x86) { |
case 4: /* 486: untested */ |
break; |
case 5: /* Old Pentia: untested */ |
break; |
case 6: /* PII/PIII only like movsl with 8-byte alignment */ |
movsl_mask.mask = 7; |
break; |
case 15: /* P4 is OK down to 8-byte alignment */ |
movsl_mask.mask = 7; |
break; |
} |
#endif |
if (c->x86 == 15) |
set_bit(X86_FEATURE_P4, c->x86_capability); |
if (c->x86 == 6) |
set_bit(X86_FEATURE_P3, c->x86_capability); |
} |
static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) |
{ |
/* Intel PIII Tualatin. This comes in two flavours. |
* One has 256kb of cache, the other 512. We have no way |
* to determine which, so we use a boottime override |
* for the 512kb model, and assume 256 otherwise. |
*/ |
if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) |
size = 256; |
return size; |
} |
static struct cpu_dev intel_cpu_dev __initdata = { |
.c_vendor = "Intel", |
.c_ident = { "GenuineIntel" }, |
.c_models = { |
{ .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = |
{ |
[0] = "486 DX-25/33", |
[1] = "486 DX-50", |
[2] = "486 SX", |
[3] = "486 DX/2", |
[4] = "486 SL", |
[5] = "486 SX/2", |
[7] = "486 DX/2-WB", |
[8] = "486 DX/4", |
[9] = "486 DX/4-WB" |
} |
}, |
{ .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = |
{ |
[0] = "Pentium 60/66 A-step", |
[1] = "Pentium 60/66", |
[2] = "Pentium 75 - 200", |
[3] = "OverDrive PODP5V83", |
[4] = "Pentium MMX", |
[7] = "Mobile Pentium 75 - 200", |
[8] = "Mobile Pentium MMX" |
} |
}, |
{ .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = |
{ |
[0] = "Pentium Pro A-step", |
[1] = "Pentium Pro", |
[3] = "Pentium II (Klamath)", |
[4] = "Pentium II (Deschutes)", |
[5] = "Pentium II (Deschutes)", |
[6] = "Mobile Pentium II", |
[7] = "Pentium III (Katmai)", |
[8] = "Pentium III (Coppermine)", |
[10] = "Pentium III (Cascades)", |
[11] = "Pentium III (Tualatin)", |
} |
}, |
{ .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = |
{ |
[0] = "Pentium 4 (Unknown)", |
[1] = "Pentium 4 (Willamette)", |
[2] = "Pentium 4 (Northwood)", |
[4] = "Pentium 4 (Foster)", |
[5] = "Pentium 4 (Foster)", |
} |
}, |
}, |
.c_init = init_intel, |
.c_identify = generic_identify, |
.c_size_cache = intel_size_cache, |
}; |
__init int intel_cpu_init(void) |
{ |
cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev; |
return 0; |
} |
// arch_initcall(intel_cpu_init); |
/shark/trunk/drivers/cpu/makefile |
---|
0,0 → 1,26 |
# CPU support from linux 2.6.0 |
ifndef BASE |
BASE=../.. |
endif |
include $(BASE)/config/config.mk |
LIBRARY = cpu |
OBJS_PATH = $(BASE)/drivers/cpu |
OBJS = common.o amd.o cyrix.o intel.o\ |
cpufreq/cpufreq.o cpufreq/freq_table.o\ |
cpufreq/powernow-k6.o cpufreq/powernow-k7.o cpufreq/powernow-k8.o\ |
cpufreq/gx-suspmod.o cpufreq/p4-clockmod.o\ |
cpufreq/speedstep-lib.o cpufreq/speedstep-centrino.o\ |
cpufreq/speedstep-ich.o\ |
shark/shark_cpu.o |
# cpufreq/speedstep-smi.o |
OTHERINCL += -I$(BASE)/drivers/linuxc26/include |
C_OPT += -D__KERNEL__ |
include $(BASE)/config/lib.mk |
/shark/trunk/drivers/cpu/cpu.h |
---|
0,0 → 1,28 |
struct cpu_model_info { |
int vendor; |
int family; |
char *model_names[16]; |
}; |
/* attempt to consolidate cpu attributes */ |
struct cpu_dev { |
char * c_vendor; |
/* some have two possibilities for cpuid string */ |
char * c_ident[2]; |
struct cpu_model_info c_models[4]; |
void (*c_init)(struct cpuinfo_x86 * c); |
void (*c_identify)(struct cpuinfo_x86 * c); |
unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); |
}; |
extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; |
extern int get_model_name(struct cpuinfo_x86 *c); |
extern void display_cacheinfo(struct cpuinfo_x86 *c); |
extern void generic_identify(struct cpuinfo_x86 * c); |
extern int have_cpuid_p(void); |