Rev 582 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
582 | mauro | 1 | /* |
2 | * Pentium 4/Xeon CPU on demand clock modulation/speed scaling |
||
3 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
||
4 | * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com> |
||
5 | * (C) 2002 Arjan van de Ven <arjanv@redhat.com> |
||
6 | * (C) 2002 Tora T. Engstad |
||
7 | * All Rights Reserved |
||
8 | * |
||
9 | * This program is free software; you can redistribute it and/or |
||
10 | * modify it under the terms of the GNU General Public License |
||
11 | * as published by the Free Software Foundation; either version |
||
12 | * 2 of the License, or (at your option) any later version. |
||
13 | * |
||
14 | * The author(s) of this software shall not be held liable for damages |
||
15 | * of any nature resulting due to the use of this software. This |
||
16 | * software is provided AS-IS with no warranties. |
||
17 | * |
||
18 | * Date Errata Description |
||
19 | * 20020525 N44, O17 12.5% or 25% DC causes lockup |
||
20 | * |
||
21 | */ |
||
22 | |||
23 | #include <linuxcomp.h> |
||
24 | |||
25 | #include <linux/config.h> |
||
26 | #include <linux/kernel.h> |
||
27 | #include <linux/module.h> |
||
28 | #include <linux/init.h> |
||
29 | #include <linux/smp.h> |
||
30 | #include <linux/cpufreq.h> |
||
31 | #include <linux/slab.h> |
||
32 | #include <linux/sched.h> |
||
33 | |||
34 | #include <asm/processor.h> |
||
35 | #include <asm/msr.h> |
||
36 | #include <asm/timex.h> |
||
37 | |||
38 | #define PFX "cpufreq: " |
||
39 | |||
40 | /* |
||
41 | * Duty Cycle (3bits), note DC_DISABLE is not specified in |
||
42 | * intel docs i just use it to mean disable |
||
43 | */ |
||
44 | enum { |
||
45 | DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT, |
||
46 | DC_64PT, DC_75PT, DC_88PT, DC_DISABLE |
||
47 | }; |
||
48 | |||
49 | #define DC_ENTRIES 8 |
||
50 | |||
51 | |||
52 | static int has_N44_O17_errata[NR_CPUS]; |
||
53 | static int stock_freq; |
||
54 | |||
55 | |||
56 | static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) |
||
57 | { |
||
58 | u32 l, h; |
||
59 | cpumask_t cpus_allowed, affected_cpu_map; |
||
60 | struct cpufreq_freqs freqs; |
||
61 | int hyperthreading = 0; |
||
62 | int sibling = 0; |
||
63 | |||
64 | if (!cpu_online(cpu) || (newstate > DC_DISABLE) || |
||
65 | (newstate == DC_RESV)) |
||
66 | return -EINVAL; |
||
67 | |||
68 | /* switch to physical CPU where state is to be changed*/ |
||
69 | cpus_allowed = current->cpus_allowed; |
||
70 | |||
71 | /* only run on CPU to be set, or on its sibling */ |
||
72 | affected_cpu_map = cpumask_of_cpu(cpu); |
||
73 | #ifdef CONFIG_X86_HT |
||
74 | hyperthreading = ((cpu_has_ht) && (smp_num_siblings == 2)); |
||
75 | if (hyperthreading) { |
||
76 | sibling = cpu_sibling_map[cpu]; |
||
77 | cpu_set(sibling, affected_cpu_map); |
||
78 | } |
||
79 | #endif |
||
80 | set_cpus_allowed(current, affected_cpu_map); |
||
81 | BUG_ON(!cpu_isset(smp_processor_id(), affected_cpu_map)); |
||
82 | |||
83 | /* get current state */ |
||
84 | rdmsr(MSR_IA32_THERM_CONTROL, l, h); |
||
85 | if (l & 0x10) { |
||
86 | l = l >> 1; |
||
87 | l &= 0x7; |
||
88 | } else |
||
89 | l = DC_DISABLE; |
||
90 | |||
91 | if (l == newstate) { |
||
92 | set_cpus_allowed(current, cpus_allowed); |
||
93 | return 0; |
||
94 | } else if (l == DC_RESV) { |
||
95 | printk(KERN_ERR PFX "BIG FAT WARNING: currently in invalid setting\n"); |
||
96 | } |
||
97 | |||
98 | /* notifiers */ |
||
99 | freqs.old = stock_freq * l / 8; |
||
100 | freqs.new = stock_freq * newstate / 8; |
||
101 | freqs.cpu = cpu; |
||
600 | mauro | 102 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
582 | mauro | 103 | if (hyperthreading) { |
104 | freqs.cpu = sibling; |
||
600 | mauro | 105 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
582 | mauro | 106 | } |
107 | |||
108 | rdmsr(MSR_IA32_THERM_STATUS, l, h); |
||
109 | #if 0 |
||
110 | if (l & 0x01) |
||
111 | printk(KERN_DEBUG PFX "CPU#%d currently thermal throttled\n", cpu); |
||
112 | #endif |
||
113 | if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT)) |
||
114 | newstate = DC_38PT; |
||
115 | |||
116 | rdmsr(MSR_IA32_THERM_CONTROL, l, h); |
||
117 | if (newstate == DC_DISABLE) { |
||
118 | /* printk(KERN_INFO PFX "CPU#%d disabling modulation\n", cpu); */ |
||
119 | wrmsr(MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); |
||
120 | } else { |
||
121 | /* printk(KERN_INFO PFX "CPU#%d setting duty cycle to %d%%\n", |
||
122 | cpu, ((125 * newstate) / 10)); */ |
||
123 | /* bits 63 - 5 : reserved |
||
124 | * bit 4 : enable/disable |
||
125 | * bits 3-1 : duty cycle |
||
126 | * bit 0 : reserved |
||
127 | */ |
||
128 | l = (l & ~14); |
||
129 | l = l | (1<<4) | ((newstate & 0x7)<<1); |
||
130 | wrmsr(MSR_IA32_THERM_CONTROL, l, h); |
||
131 | } |
||
132 | |||
133 | set_cpus_allowed(current, cpus_allowed); |
||
134 | |||
135 | /* notifiers */ |
||
600 | mauro | 136 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
582 | mauro | 137 | if (hyperthreading) { |
138 | freqs.cpu = cpu; |
||
600 | mauro | 139 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
582 | mauro | 140 | } |
141 | |||
142 | return 0; |
||
143 | } |
||
144 | |||
145 | |||
146 | static struct cpufreq_frequency_table p4clockmod_table[] = { |
||
147 | {DC_RESV, CPUFREQ_ENTRY_INVALID}, |
||
148 | {DC_DFLT, 0}, |
||
149 | {DC_25PT, 0}, |
||
150 | {DC_38PT, 0}, |
||
151 | {DC_50PT, 0}, |
||
152 | {DC_64PT, 0}, |
||
153 | {DC_75PT, 0}, |
||
154 | {DC_88PT, 0}, |
||
155 | {DC_DISABLE, 0}, |
||
156 | {DC_RESV, CPUFREQ_TABLE_END}, |
||
157 | }; |
||
158 | |||
159 | |||
160 | static int cpufreq_p4_target(struct cpufreq_policy *policy, |
||
161 | unsigned int target_freq, |
||
162 | unsigned int relation) |
||
163 | { |
||
164 | unsigned int newstate = DC_RESV; |
||
165 | |||
166 | if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate)) |
||
167 | return -EINVAL; |
||
168 | |||
169 | cpufreq_p4_setdc(policy->cpu, p4clockmod_table[newstate].index); |
||
170 | |||
171 | return 0; |
||
172 | } |
||
173 | |||
174 | |||
175 | static int cpufreq_p4_verify(struct cpufreq_policy *policy) |
||
176 | { |
||
177 | return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); |
||
178 | } |
||
179 | |||
180 | |||
181 | static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) |
||
182 | { |
||
183 | struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; |
||
184 | int cpuid = 0; |
||
185 | unsigned int i; |
||
186 | |||
187 | /* Errata workaround */ |
||
188 | cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; |
||
189 | switch (cpuid) { |
||
190 | case 0x0f07: |
||
191 | case 0x0f0a: |
||
192 | case 0x0f11: |
||
193 | case 0x0f12: |
||
194 | has_N44_O17_errata[policy->cpu] = 1; |
||
195 | } |
||
196 | |||
197 | /* get frequency */ |
||
198 | if (!stock_freq) { |
||
199 | if (cpu_khz) |
||
200 | stock_freq = cpu_khz; |
||
201 | else { |
||
202 | printk(KERN_INFO PFX "unknown core frequency - please use module parameter 'stock_freq'\n"); |
||
203 | return -EINVAL; |
||
204 | } |
||
205 | } |
||
206 | |||
207 | /* table init */ |
||
208 | for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { |
||
209 | if ((i<2) && (has_N44_O17_errata[policy->cpu])) |
||
210 | p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; |
||
211 | else |
||
212 | p4clockmod_table[i].frequency = (stock_freq * i)/8; |
||
213 | } |
||
214 | cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); |
||
215 | |||
216 | /* cpuinfo and default policy values */ |
||
217 | policy->governor = 0; //!!!CPUFREQ_DEFAULT_GOVERNOR; |
||
218 | policy->cpuinfo.transition_latency = 1000; |
||
219 | policy->cur = stock_freq; |
||
220 | |||
221 | return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); |
||
222 | } |
||
223 | |||
224 | |||
225 | static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) |
||
226 | { |
||
227 | cpufreq_frequency_table_put_attr(policy->cpu); |
||
228 | return cpufreq_p4_setdc(policy->cpu, DC_DISABLE); |
||
229 | } |
||
230 | |||
231 | static struct freq_attr* p4clockmod_attr[] = { |
||
232 | &cpufreq_freq_attr_scaling_available_freqs, |
||
233 | NULL, |
||
234 | }; |
||
235 | |||
236 | static struct cpufreq_driver p4clockmod_driver = { |
||
237 | .verify = cpufreq_p4_verify, |
||
238 | .target = cpufreq_p4_target, |
||
239 | .init = cpufreq_p4_cpu_init, |
||
240 | .exit = cpufreq_p4_cpu_exit, |
||
241 | .name = "p4-clockmod", |
||
242 | .owner = THIS_MODULE, |
||
243 | .attr = p4clockmod_attr, |
||
244 | }; |
||
245 | |||
246 | |||
247 | /*static*/ int __init cpufreq_p4_init(void) |
||
248 | { |
||
249 | struct cpuinfo_x86 *c = cpu_data; |
||
250 | |||
251 | /* |
||
252 | * THERM_CONTROL is architectural for IA32 now, so |
||
253 | * we can rely on the capability checks |
||
254 | */ |
||
255 | if (c->x86_vendor != X86_VENDOR_INTEL) |
||
256 | return -ENODEV; |
||
257 | |||
258 | if (!test_bit(X86_FEATURE_ACPI, c->x86_capability) || |
||
259 | !test_bit(X86_FEATURE_ACC, c->x86_capability)) |
||
260 | return -ENODEV; |
||
261 | |||
262 | printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n"); |
||
263 | |||
264 | return cpufreq_register_driver(&p4clockmod_driver); |
||
265 | } |
||
266 | |||
267 | |||
268 | /*static*/ void __exit cpufreq_p4_exit(void) |
||
269 | { |
||
270 | cpufreq_unregister_driver(&p4clockmod_driver); |
||
271 | } |
||
272 | |||
273 | |||
274 | MODULE_PARM(stock_freq, "i"); |
||
275 | |||
276 | MODULE_AUTHOR ("Zwane Mwaikambo <zwane@commfireservices.com>"); |
||
277 | MODULE_DESCRIPTION ("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); |
||
278 | MODULE_LICENSE ("GPL"); |
||
279 | |||
280 | module_init(cpufreq_p4_init); |
||
281 | module_exit(cpufreq_p4_exit); |
||
282 |