ia64/linux-2.6.18-xen.hg

view drivers/acpi/processor_throttling.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents e1e8cc97331d
children
line source
1 /*
2 * processor_throttling.c - Throttling submodule of the ACPI processor driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 *
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 */
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/sched.h>
33 #include <linux/cpufreq.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
37 #include <asm/io.h>
38 #include <asm/uaccess.h>
40 #include <acpi/acpi_bus.h>
41 #include <acpi/processor.h>
43 #define ACPI_PROCESSOR_COMPONENT 0x01000000
44 #define ACPI_PROCESSOR_CLASS "processor"
45 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
46 ACPI_MODULE_NAME("processor_throttling");
48 struct throttling_tstate {
49 unsigned int cpu; /* cpu nr */
50 int target_state; /* target T-state */
51 };
53 #define THROTTLING_PRECHANGE (1)
54 #define THROTTLING_POSTCHANGE (2)
56 static int acpi_processor_get_throttling(struct acpi_processor *pr);
57 int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
59 static int acpi_processor_update_tsd_coord(void)
60 {
61 int count, count_target;
62 int retval = 0;
63 unsigned int i, j;
64 cpumask_t covered_cpus;
65 struct acpi_processor *pr, *match_pr;
66 struct acpi_tsd_package *pdomain, *match_pdomain;
67 struct acpi_processor_throttling *pthrottling, *match_pthrottling;
69 /*
70 * Now that we have _TSD data from all CPUs, lets setup T-state
71 * coordination between all CPUs.
72 */
73 for_each_possible_cpu(i) {
74 pr = processors[i];
75 if (!pr)
76 continue;
78 /* Basic validity check for domain info */
79 pthrottling = &(pr->throttling);
81 /*
82 * If tsd package for one cpu is invalid, the coordination
83 * among all CPUs is thought as invalid.
84 * Maybe it is ugly.
85 */
86 if (!pthrottling->tsd_valid_flag) {
87 retval = -EINVAL;
88 break;
89 }
90 }
91 if (retval)
92 goto err_ret;
94 cpus_clear(covered_cpus);
95 for_each_possible_cpu(i) {
96 pr = processors[i];
97 if (!pr)
98 continue;
100 if (cpu_isset(i, covered_cpus))
101 continue;
102 pthrottling = &pr->throttling;
104 pdomain = &(pthrottling->domain_info);
105 cpu_set(i, pthrottling->shared_cpu_map);
106 cpu_set(i, covered_cpus);
107 /*
108 * If the number of processor in the TSD domain is 1, it is
109 * unnecessary to parse the coordination for this CPU.
110 */
111 if (pdomain->num_processors <= 1)
112 continue;
114 /* Validate the Domain info */
115 count_target = pdomain->num_processors;
116 count = 1;
118 for_each_possible_cpu(j) {
119 if (i == j)
120 continue;
122 match_pr = processors[j];
123 if (!match_pr)
124 continue;
126 match_pthrottling = &(match_pr->throttling);
127 match_pdomain = &(match_pthrottling->domain_info);
128 if (match_pdomain->domain != pdomain->domain)
129 continue;
131 /* Here i and j are in the same domain.
132 * If two TSD packages have the same domain, they
133 * should have the same num_porcessors and
134 * coordination type. Otherwise it will be regarded
135 * as illegal.
136 */
137 if (match_pdomain->num_processors != count_target) {
138 retval = -EINVAL;
139 goto err_ret;
140 }
142 if (pdomain->coord_type != match_pdomain->coord_type) {
143 retval = -EINVAL;
144 goto err_ret;
145 }
147 cpu_set(j, covered_cpus);
148 cpu_set(j, pthrottling->shared_cpu_map);
149 count++;
150 }
151 for_each_possible_cpu(j) {
152 if (i == j)
153 continue;
155 match_pr = processors[j];
156 if (!match_pr)
157 continue;
159 match_pthrottling = &(match_pr->throttling);
160 match_pdomain = &(match_pthrottling->domain_info);
161 if (match_pdomain->domain != pdomain->domain)
162 continue;
164 /*
165 * If some CPUS have the same domain, they
166 * will have the same shared_cpu_map.
167 */
168 match_pthrottling->shared_cpu_map =
169 pthrottling->shared_cpu_map;
170 }
171 }
173 err_ret:
174 for_each_possible_cpu(i) {
175 pr = processors[i];
176 if (!pr)
177 continue;
179 /*
180 * Assume no coordination on any error parsing domain info.
181 * The coordination type will be forced as SW_ALL.
182 */
183 if (retval) {
184 pthrottling = &(pr->throttling);
185 cpus_clear(pthrottling->shared_cpu_map);
186 cpu_set(i, pthrottling->shared_cpu_map);
187 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
188 }
189 }
191 return retval;
192 }
194 /*
195 * Update the T-state coordination after the _TSD
196 * data for all cpus is obtained.
197 */
198 void acpi_processor_throttling_init(void)
199 {
200 if (acpi_processor_update_tsd_coord())
201 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
202 "Assume no T-state coordination\n"));
204 return;
205 }
207 static int acpi_processor_throttling_notifier(unsigned long event, void *data)
208 {
209 struct throttling_tstate *p_tstate = data;
210 struct acpi_processor *pr;
211 unsigned int cpu ;
212 int target_state;
213 struct acpi_processor_limit *p_limit;
214 struct acpi_processor_throttling *p_throttling;
216 cpu = p_tstate->cpu;
217 pr = processors[cpu];
218 if (!pr) {
219 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
220 return 0;
221 }
222 if (!pr->flags.throttling) {
223 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
224 "unsupported on CPU %d\n", cpu));
225 return 0;
226 }
227 target_state = p_tstate->target_state;
228 p_throttling = &(pr->throttling);
229 switch (event) {
230 case THROTTLING_PRECHANGE:
231 /*
232 * Prechange event is used to choose one proper t-state,
233 * which meets the limits of thermal, user and _TPC.
234 */
235 p_limit = &pr->limit;
236 if (p_limit->thermal.tx > target_state)
237 target_state = p_limit->thermal.tx;
238 if (p_limit->user.tx > target_state)
239 target_state = p_limit->user.tx;
240 if (pr->throttling_platform_limit > target_state)
241 target_state = pr->throttling_platform_limit;
242 if (target_state >= p_throttling->state_count) {
243 printk(KERN_WARNING
244 "Exceed the limit of T-state \n");
245 target_state = p_throttling->state_count - 1;
246 }
247 p_tstate->target_state = target_state;
248 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
249 "target T-state of CPU %d is T%d\n",
250 cpu, target_state));
251 break;
252 case THROTTLING_POSTCHANGE:
253 /*
254 * Postchange event is only used to update the
255 * T-state flag of acpi_processor_throttling.
256 */
257 p_throttling->state = target_state;
258 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
259 "CPU %d is switched to T%d\n",
260 cpu, target_state));
261 break;
262 default:
263 printk(KERN_WARNING
264 "Unsupported Throttling notifier event\n");
265 break;
266 }
268 return 0;
269 }
271 /*
272 * _TPC - Throttling Present Capabilities
273 */
274 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
275 {
276 acpi_status status = 0;
277 unsigned long tpc = 0;
279 if (!pr)
280 return -EINVAL;
281 status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
282 if (ACPI_FAILURE(status)) {
283 if (status != AE_NOT_FOUND) {
284 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
285 }
286 return -ENODEV;
287 }
288 pr->throttling_platform_limit = (int)tpc;
289 return 0;
290 }
292 int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
293 {
294 int result = 0;
295 int throttling_limit;
296 int current_state;
297 struct acpi_processor_limit *limit;
298 int target_state;
300 result = acpi_processor_get_platform_limit(pr);
301 if (result) {
302 /* Throttling Limit is unsupported */
303 return result;
304 }
306 throttling_limit = pr->throttling_platform_limit;
307 if (throttling_limit >= pr->throttling.state_count) {
308 /* Uncorrect Throttling Limit */
309 return -EINVAL;
310 }
312 current_state = pr->throttling.state;
313 if (current_state > throttling_limit) {
314 /*
315 * The current state can meet the requirement of
316 * _TPC limit. But it is reasonable that OSPM changes
317 * t-states from high to low for better performance.
318 * Of course the limit condition of thermal
319 * and user should be considered.
320 */
321 limit = &pr->limit;
322 target_state = throttling_limit;
323 if (limit->thermal.tx > target_state)
324 target_state = limit->thermal.tx;
325 if (limit->user.tx > target_state)
326 target_state = limit->user.tx;
327 } else if (current_state == throttling_limit) {
328 /*
329 * Unnecessary to change the throttling state
330 */
331 return 0;
332 } else {
333 /*
334 * If the current state is lower than the limit of _TPC, it
335 * will be forced to switch to the throttling state defined
336 * by throttling_platfor_limit.
337 * Because the previous state meets with the limit condition
338 * of thermal and user, it is unnecessary to check it again.
339 */
340 target_state = throttling_limit;
341 }
342 return acpi_processor_set_throttling(pr, target_state);
343 }
345 /*
346 * _PTC - Processor Throttling Control (and status) register location
347 */
348 static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
349 {
350 int result = 0;
351 acpi_status status = 0;
352 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
353 union acpi_object *ptc = NULL;
354 union acpi_object obj = { 0 };
355 struct acpi_processor_throttling *throttling;
357 status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
358 if (ACPI_FAILURE(status)) {
359 if (status != AE_NOT_FOUND) {
360 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
361 }
362 return -ENODEV;
363 }
365 ptc = (union acpi_object *)buffer.pointer;
366 if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
367 || (ptc->package.count != 2)) {
368 printk(KERN_ERR PREFIX "Invalid _PTC data\n");
369 result = -EFAULT;
370 goto end;
371 }
373 /*
374 * control_register
375 */
377 obj = ptc->package.elements[0];
379 if ((obj.type != ACPI_TYPE_BUFFER)
380 || (obj.buffer.length < sizeof(struct acpi_ptc_register))
381 || (obj.buffer.pointer == NULL)) {
382 printk(KERN_ERR PREFIX
383 "Invalid _PTC data (control_register)\n");
384 result = -EFAULT;
385 goto end;
386 }
387 memcpy(&pr->throttling.control_register, obj.buffer.pointer,
388 sizeof(struct acpi_ptc_register));
390 /*
391 * status_register
392 */
394 obj = ptc->package.elements[1];
396 if ((obj.type != ACPI_TYPE_BUFFER)
397 || (obj.buffer.length < sizeof(struct acpi_ptc_register))
398 || (obj.buffer.pointer == NULL)) {
399 printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
400 result = -EFAULT;
401 goto end;
402 }
404 memcpy(&pr->throttling.status_register, obj.buffer.pointer,
405 sizeof(struct acpi_ptc_register));
407 throttling = &pr->throttling;
409 if ((throttling->control_register.bit_width +
410 throttling->control_register.bit_offset) > 32) {
411 printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
412 result = -EFAULT;
413 goto end;
414 }
416 if ((throttling->status_register.bit_width +
417 throttling->status_register.bit_offset) > 32) {
418 printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
419 result = -EFAULT;
420 goto end;
421 }
423 end:
424 kfree(buffer.pointer);
426 return result;
427 }
429 /*
430 * _TSS - Throttling Supported States
431 */
432 static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
433 {
434 int result = 0;
435 acpi_status status = AE_OK;
436 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
437 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
438 struct acpi_buffer state = { 0, NULL };
439 union acpi_object *tss = NULL;
440 int i;
442 status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
443 if (ACPI_FAILURE(status)) {
444 if (status != AE_NOT_FOUND) {
445 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
446 }
447 return -ENODEV;
448 }
450 tss = buffer.pointer;
451 if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
452 printk(KERN_ERR PREFIX "Invalid _TSS data\n");
453 result = -EFAULT;
454 goto end;
455 }
457 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
458 tss->package.count));
460 pr->throttling.state_count = tss->package.count;
461 pr->throttling.states_tss =
462 kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
463 GFP_KERNEL);
464 if (!pr->throttling.states_tss) {
465 result = -ENOMEM;
466 goto end;
467 }
469 for (i = 0; i < pr->throttling.state_count; i++) {
471 struct acpi_processor_tx_tss *tx =
472 (struct acpi_processor_tx_tss *)&(pr->throttling.
473 states_tss[i]);
475 state.length = sizeof(struct acpi_processor_tx_tss);
476 state.pointer = tx;
478 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
480 status = acpi_extract_package(&(tss->package.elements[i]),
481 &format, &state);
482 if (ACPI_FAILURE(status)) {
483 ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
484 result = -EFAULT;
485 kfree(pr->throttling.states_tss);
486 goto end;
487 }
489 if (!tx->freqpercentage) {
490 printk(KERN_ERR PREFIX
491 "Invalid _TSS data: freq is zero\n");
492 result = -EFAULT;
493 kfree(pr->throttling.states_tss);
494 goto end;
495 }
496 }
498 end:
499 kfree(buffer.pointer);
501 return result;
502 }
504 /*
505 * _TSD - T-State Dependencies
506 */
507 static int acpi_processor_get_tsd(struct acpi_processor *pr)
508 {
509 int result = 0;
510 acpi_status status = AE_OK;
511 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
512 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
513 struct acpi_buffer state = { 0, NULL };
514 union acpi_object *tsd = NULL;
515 struct acpi_tsd_package *pdomain;
516 struct acpi_processor_throttling *pthrottling;
518 pthrottling = &pr->throttling;
519 pthrottling->tsd_valid_flag = 0;
521 status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
522 if (ACPI_FAILURE(status)) {
523 if (status != AE_NOT_FOUND) {
524 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
525 }
526 return -ENODEV;
527 }
529 tsd = buffer.pointer;
530 if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
531 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
532 result = -EFAULT;
533 goto end;
534 }
536 if (tsd->package.count != 1) {
537 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
538 result = -EFAULT;
539 goto end;
540 }
542 pdomain = &(pr->throttling.domain_info);
544 state.length = sizeof(struct acpi_tsd_package);
545 state.pointer = pdomain;
547 status = acpi_extract_package(&(tsd->package.elements[0]),
548 &format, &state);
549 if (ACPI_FAILURE(status)) {
550 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
551 result = -EFAULT;
552 goto end;
553 }
555 if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
556 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n"));
557 result = -EFAULT;
558 goto end;
559 }
561 if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
562 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n"));
563 result = -EFAULT;
564 goto end;
565 }
567 pthrottling = &pr->throttling;
568 pthrottling->tsd_valid_flag = 1;
569 pthrottling->shared_type = pdomain->coord_type;
570 cpu_set(pr->id, pthrottling->shared_cpu_map);
571 /*
572 * If the coordination type is not defined in ACPI spec,
573 * the tsd_valid_flag will be clear and coordination type
574 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
575 */
576 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
577 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
578 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
579 pthrottling->tsd_valid_flag = 0;
580 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
581 }
583 end:
584 kfree(buffer.pointer);
585 return result;
586 }
588 /* --------------------------------------------------------------------------
589 Throttling Control
590 -------------------------------------------------------------------------- */
591 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
592 {
593 int state = 0;
594 u32 value = 0;
595 u32 duty_mask = 0;
596 u32 duty_value = 0;
598 if (!pr)
599 return -EINVAL;
601 if (!pr->flags.throttling)
602 return -ENODEV;
604 pr->throttling.state = 0;
606 duty_mask = pr->throttling.state_count - 1;
608 duty_mask <<= pr->throttling.duty_offset;
610 local_irq_disable();
612 value = inl(pr->throttling.address);
614 /*
615 * Compute the current throttling state when throttling is enabled
616 * (bit 4 is on).
617 */
618 if (value & 0x10) {
619 duty_value = value & duty_mask;
620 duty_value >>= pr->throttling.duty_offset;
622 if (duty_value)
623 state = pr->throttling.state_count - duty_value;
624 }
626 pr->throttling.state = state;
628 local_irq_enable();
630 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
631 "Throttling state is T%d (%d%% throttling applied)\n",
632 state, pr->throttling.states[state].performance));
634 return 0;
635 }
637 #ifdef CONFIG_X86
638 static int acpi_throttling_rdmsr(struct acpi_processor *pr,
639 acpi_integer * value)
640 {
641 struct cpuinfo_x86 *c;
642 u64 msr_high, msr_low;
643 unsigned int cpu;
644 u64 msr = 0;
645 int ret = -1;
647 cpu = pr->id;
648 c = &cpu_data[cpu];
650 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
651 !cpu_has(c, X86_FEATURE_ACPI)) {
652 printk(KERN_ERR PREFIX
653 "HARDWARE addr space,NOT supported yet\n");
654 } else {
655 msr_low = 0;
656 msr_high = 0;
657 rdmsr_safe(MSR_IA32_THERM_CONTROL,
658 (u32 *)&msr_low , (u32 *) &msr_high);
659 msr = (msr_high << 32) | msr_low;
660 *value = (acpi_integer) msr;
661 ret = 0;
662 }
663 return ret;
664 }
666 static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
667 {
668 struct cpuinfo_x86 *c;
669 unsigned int cpu;
670 int ret = -1;
671 u64 msr;
673 cpu = pr->id;
674 c = &cpu_data[cpu];
676 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
677 !cpu_has(c, X86_FEATURE_ACPI)) {
678 printk(KERN_ERR PREFIX
679 "HARDWARE addr space,NOT supported yet\n");
680 } else {
681 msr = value;
682 wrmsr_safe(MSR_IA32_THERM_CONTROL,
683 (u32)msr, (u32)(msr >> 32));
684 ret = 0;
685 }
686 return ret;
687 }
688 #else
689 static int acpi_throttling_rdmsr(struct acpi_processor *pr,
690 acpi_integer * value)
691 {
692 printk(KERN_ERR PREFIX
693 "HARDWARE addr space,NOT supported yet\n");
694 return -1;
695 }
697 static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
698 {
699 printk(KERN_ERR PREFIX
700 "HARDWARE addr space,NOT supported yet\n");
701 return -1;
702 }
703 #endif
705 static int acpi_read_throttling_status(struct acpi_processor *pr,
706 acpi_integer *value)
707 {
708 u32 bit_width, bit_offset;
709 u64 ptc_value;
710 u64 ptc_mask;
711 struct acpi_processor_throttling *throttling;
712 int ret = -1;
714 throttling = &pr->throttling;
715 switch (throttling->status_register.space_id) {
716 case ACPI_ADR_SPACE_SYSTEM_IO:
717 ptc_value = 0;
718 bit_width = throttling->status_register.bit_width;
719 bit_offset = throttling->status_register.bit_offset;
721 acpi_os_read_port((acpi_io_address) throttling->status_register.
722 address, (u32 *) &ptc_value,
723 (u32) (bit_width + bit_offset));
724 ptc_mask = (1 << bit_width) - 1;
725 *value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
726 ret = 0;
727 break;
728 case ACPI_ADR_SPACE_FIXED_HARDWARE:
729 ret = acpi_throttling_rdmsr(pr, value);
730 break;
731 default:
732 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
733 (u32) (throttling->status_register.space_id));
734 }
735 return ret;
736 }
738 static int acpi_write_throttling_state(struct acpi_processor *pr,
739 acpi_integer value)
740 {
741 u32 bit_width, bit_offset;
742 u64 ptc_value;
743 u64 ptc_mask;
744 struct acpi_processor_throttling *throttling;
745 int ret = -1;
747 throttling = &pr->throttling;
748 switch (throttling->control_register.space_id) {
749 case ACPI_ADR_SPACE_SYSTEM_IO:
750 bit_width = throttling->control_register.bit_width;
751 bit_offset = throttling->control_register.bit_offset;
752 ptc_mask = (1 << bit_width) - 1;
753 ptc_value = value & ptc_mask;
755 acpi_os_write_port((acpi_io_address) throttling->
756 control_register.address,
757 (u32) (ptc_value << bit_offset),
758 (u32) (bit_width + bit_offset));
759 ret = 0;
760 break;
761 case ACPI_ADR_SPACE_FIXED_HARDWARE:
762 ret = acpi_throttling_wrmsr(pr, value);
763 break;
764 default:
765 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
766 (u32) (throttling->control_register.space_id));
767 }
768 return ret;
769 }
771 static int acpi_get_throttling_state(struct acpi_processor *pr,
772 acpi_integer value)
773 {
774 int i;
776 for (i = 0; i < pr->throttling.state_count; i++) {
777 struct acpi_processor_tx_tss *tx =
778 (struct acpi_processor_tx_tss *)&(pr->throttling.
779 states_tss[i]);
780 if (tx->control == value)
781 break;
782 }
783 if (i > pr->throttling.state_count)
784 i = -1;
785 return i;
786 }
788 static int acpi_get_throttling_value(struct acpi_processor *pr,
789 int state, acpi_integer *value)
790 {
791 int ret = -1;
793 if (state >= 0 && state <= pr->throttling.state_count) {
794 struct acpi_processor_tx_tss *tx =
795 (struct acpi_processor_tx_tss *)&(pr->throttling.
796 states_tss[state]);
797 *value = tx->control;
798 ret = 0;
799 }
800 return ret;
801 }
803 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
804 {
805 int state = 0;
806 int ret;
807 acpi_integer value;
809 if (!pr)
810 return -EINVAL;
812 if (!pr->flags.throttling)
813 return -ENODEV;
815 pr->throttling.state = 0;
817 value = 0;
818 ret = acpi_read_throttling_status(pr, &value);
819 if (ret >= 0) {
820 state = acpi_get_throttling_state(pr, value);
821 pr->throttling.state = state;
822 }
824 return 0;
825 }
827 static int acpi_processor_get_throttling(struct acpi_processor *pr)
828 {
829 cpumask_t saved_mask;
830 int ret;
832 if (!pr)
833 return -EINVAL;
835 if (!pr->flags.throttling)
836 return -ENODEV;
837 /*
838 * Migrate task to the cpu pointed by pr.
839 */
840 saved_mask = current->cpus_allowed;
841 set_cpus_allowed(current, cpumask_of_cpu(pr->id));
842 ret = pr->throttling.acpi_processor_get_throttling(pr);
843 /* restore the previous state */
844 set_cpus_allowed(current, saved_mask);
846 return ret;
847 }
849 static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
850 {
851 int i, step;
853 if (!pr->throttling.address) {
854 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
855 return -EINVAL;
856 } else if (!pr->throttling.duty_width) {
857 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
858 return -EINVAL;
859 }
860 /* TBD: Support duty_cycle values that span bit 4. */
861 else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
862 printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
863 return -EINVAL;
864 }
866 pr->throttling.state_count = 1 << acpi_fadt.duty_width;
868 /*
869 * Compute state values. Note that throttling displays a linear power
870 * performance relationship (at 50% performance the CPU will consume
871 * 50% power). Values are in 1/10th of a percent to preserve accuracy.
872 */
874 step = (1000 / pr->throttling.state_count);
876 for (i = 0; i < pr->throttling.state_count; i++) {
877 pr->throttling.states[i].performance = 1000 - step * i;
878 pr->throttling.states[i].power = 1000 - step * i;
879 }
880 return 0;
881 }
883 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
884 int state)
885 {
886 u32 value = 0;
887 u32 duty_mask = 0;
888 u32 duty_value = 0;
890 if (!pr)
891 return -EINVAL;
893 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
894 return -EINVAL;
896 if (!pr->flags.throttling)
897 return -ENODEV;
899 if (state == pr->throttling.state)
900 return 0;
902 if (state < pr->throttling_platform_limit)
903 return -EPERM;
904 /*
905 * Calculate the duty_value and duty_mask.
906 */
907 if (state) {
908 duty_value = pr->throttling.state_count - state;
910 duty_value <<= pr->throttling.duty_offset;
912 /* Used to clear all duty_value bits */
913 duty_mask = pr->throttling.state_count - 1;
915 duty_mask <<= acpi_fadt.duty_offset;
916 duty_mask = ~duty_mask;
917 }
919 local_irq_disable();
921 /*
922 * Disable throttling by writing a 0 to bit 4. Note that we must
923 * turn it off before you can change the duty_value.
924 */
925 value = inl(pr->throttling.address);
926 if (value & 0x10) {
927 value &= 0xFFFFFFEF;
928 outl(value, pr->throttling.address);
929 }
931 /*
932 * Write the new duty_value and then enable throttling. Note
933 * that a state value of 0 leaves throttling disabled.
934 */
935 if (state) {
936 value &= duty_mask;
937 value |= duty_value;
938 outl(value, pr->throttling.address);
940 value |= 0x00000010;
941 outl(value, pr->throttling.address);
942 }
944 pr->throttling.state = state;
946 local_irq_enable();
948 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
949 "Throttling state set to T%d (%d%%)\n", state,
950 (pr->throttling.states[state].performance ? pr->
951 throttling.states[state].performance / 10 : 0)));
953 return 0;
954 }
956 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
957 int state)
958 {
959 int ret;
960 acpi_integer value;
962 if (!pr)
963 return -EINVAL;
965 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
966 return -EINVAL;
968 if (!pr->flags.throttling)
969 return -ENODEV;
971 if (state == pr->throttling.state)
972 return 0;
974 if (state < pr->throttling_platform_limit)
975 return -EPERM;
977 value = 0;
978 ret = acpi_get_throttling_value(pr, state, &value);
979 if (ret >= 0) {
980 acpi_write_throttling_state(pr, value);
981 pr->throttling.state = state;
982 }
984 return 0;
985 }
987 int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
988 {
989 cpumask_t saved_mask;
990 int ret = 0;
991 unsigned int i;
992 struct acpi_processor *match_pr;
993 struct acpi_processor_throttling *p_throttling;
994 struct throttling_tstate t_state;
995 cpumask_t online_throttling_cpus;
997 if (!pr)
998 return -EINVAL;
1000 if (!pr->flags.throttling)
1001 return -ENODEV;
1003 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1004 return -EINVAL;
1006 saved_mask = current->cpus_allowed;
1007 t_state.target_state = state;
1008 p_throttling = &(pr->throttling);
1009 cpus_and(online_throttling_cpus, cpu_online_map,
1010 p_throttling->shared_cpu_map);
1011 /*
1012 * The throttling notifier will be called for every
1013 * affected cpu in order to get one proper T-state.
1014 * The notifier event is THROTTLING_PRECHANGE.
1015 */
1016 for_each_cpu_mask(i, online_throttling_cpus) {
1017 t_state.cpu = i;
1018 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1019 &t_state);
1021 /*
1022 * The function of acpi_processor_set_throttling will be called
1023 * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
1024 * it is necessary to call it for every affected cpu. Otherwise
1025 * it can be called only for the cpu pointed by pr.
1026 */
1027 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1028 set_cpus_allowed(current, cpumask_of_cpu(pr->id));
1029 ret = p_throttling->acpi_processor_set_throttling(pr,
1030 t_state.target_state);
1031 } else {
1032 /*
1033 * When the T-state coordination is SW_ALL or HW_ALL,
1034 * it is necessary to set T-state for every affected
1035 * cpus.
1036 */
1037 for_each_cpu_mask(i, online_throttling_cpus) {
1038 match_pr = processors[i];
1039 /*
1040 * If the pointer is invalid, we will report the
1041 * error message and continue.
1042 */
1043 if (!match_pr) {
1044 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1045 "Invalid Pointer for CPU %d\n", i));
1046 continue;
1048 /*
1049 * If the throttling control is unsupported on CPU i,
1050 * we will report the error message and continue.
1051 */
1052 if (!match_pr->flags.throttling) {
1053 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1054 "Throttling Controll is unsupported "
1055 "on CPU %d\n", i));
1056 continue;
1058 t_state.cpu = i;
1059 set_cpus_allowed(current, cpumask_of_cpu(i));
1060 ret = match_pr->throttling.
1061 acpi_processor_set_throttling(
1062 match_pr, t_state.target_state);
1065 /*
1066 * After the set_throttling is called, the
1067 * throttling notifier is called for every
1068 * affected cpu to update the T-states.
1069 * The notifier event is THROTTLING_POSTCHANGE
1070 */
1071 for_each_cpu_mask(i, online_throttling_cpus) {
1072 t_state.cpu = i;
1073 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1074 &t_state);
1076 /* restore the previous state */
1077 set_cpus_allowed(current, saved_mask);
1078 return ret;
1081 int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1083 int result = 0;
1084 struct acpi_processor_throttling *pthrottling;
1086 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1087 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1088 pr->throttling.address,
1089 pr->throttling.duty_offset,
1090 pr->throttling.duty_width));
1092 if (!pr)
1093 return -EINVAL;
1095 /*
1096 * Evaluate _PTC, _TSS and _TPC
1097 * They must all be present or none of them can be used.
1098 */
1099 if (acpi_processor_get_throttling_control(pr) ||
1100 acpi_processor_get_throttling_states(pr) ||
1101 acpi_processor_get_platform_limit(pr))
1103 pr->throttling.acpi_processor_get_throttling =
1104 &acpi_processor_get_throttling_fadt;
1105 pr->throttling.acpi_processor_set_throttling =
1106 &acpi_processor_set_throttling_fadt;
1107 if (acpi_processor_get_fadt_info(pr))
1108 return 0;
1109 } else {
1110 pr->throttling.acpi_processor_get_throttling =
1111 &acpi_processor_get_throttling_ptc;
1112 pr->throttling.acpi_processor_set_throttling =
1113 &acpi_processor_set_throttling_ptc;
1116 /*
1117 * If TSD package for one CPU can't be parsed successfully, it means
1118 * that this CPU will have no coordination with other CPUs.
1119 */
1120 if (acpi_processor_get_tsd(pr)) {
1121 pthrottling = &pr->throttling;
1122 pthrottling->tsd_valid_flag = 0;
1123 cpu_set(pr->id, pthrottling->shared_cpu_map);
1124 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
1127 /*
1128 * PIIX4 Errata: We don't support throttling on the original PIIX4.
1129 * This shouldn't be an issue as few (if any) mobile systems ever
1130 * used this part.
1131 */
1132 if (errata.piix4.throttle) {
1133 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1134 "Throttling not supported on PIIX4 A- or B-step\n"));
1135 return 0;
1138 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
1139 pr->throttling.state_count));
1141 pr->flags.throttling = 1;
1143 /*
1144 * Disable throttling (if enabled). We'll let subsequent policy (e.g.
1145 * thermal) decide to lower performance if it so chooses, but for now
1146 * we'll crank up the speed.
1147 */
1149 result = acpi_processor_get_throttling(pr);
1150 if (result)
1151 goto end;
1153 if (pr->throttling.state) {
1154 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1155 "Disabling throttling (was T%d)\n",
1156 pr->throttling.state));
1157 result = acpi_processor_set_throttling(pr, 0);
1158 if (result)
1159 goto end;
1162 end:
1163 if (result)
1164 pr->flags.throttling = 0;
1166 return result;
1169 /* proc interface */
1171 static int acpi_processor_throttling_seq_show(struct seq_file *seq,
1172 void *offset)
1174 struct acpi_processor *pr = seq->private;
1175 int i = 0;
1176 int result = 0;
1178 if (!pr)
1179 goto end;
1181 if (!(pr->throttling.state_count > 0)) {
1182 seq_puts(seq, "<not supported>\n");
1183 goto end;
1186 result = acpi_processor_get_throttling(pr);
1188 if (result) {
1189 seq_puts(seq,
1190 "Could not determine current throttling state.\n");
1191 goto end;
1194 seq_printf(seq, "state count: %d\n"
1195 "active state: T%d\n"
1196 "state available: T%d to T%d\n",
1197 pr->throttling.state_count, pr->throttling.state,
1198 pr->throttling_platform_limit,
1199 pr->throttling.state_count - 1);
1201 seq_puts(seq, "states:\n");
1202 if (pr->throttling.acpi_processor_get_throttling ==
1203 acpi_processor_get_throttling_fadt) {
1204 for (i = 0; i < pr->throttling.state_count; i++)
1205 seq_printf(seq, " %cT%d: %02d%%\n",
1206 (i == pr->throttling.state ? '*' : ' '), i,
1207 (pr->throttling.states[i].performance ? pr->
1208 throttling.states[i].performance / 10 : 0));
1209 } else {
1210 for (i = 0; i < pr->throttling.state_count; i++)
1211 seq_printf(seq, " %cT%d: %02d%%\n",
1212 (i == pr->throttling.state ? '*' : ' '), i,
1213 (int)pr->throttling.states_tss[i].
1214 freqpercentage);
1217 end:
1218 return 0;
1221 static int acpi_processor_throttling_open_fs(struct inode *inode,
1222 struct file *file)
1224 return single_open(file, acpi_processor_throttling_seq_show,
1225 PDE(inode)->data);
1228 static ssize_t acpi_processor_write_throttling(struct file *file,
1229 const char __user * buffer,
1230 size_t count, loff_t * data)
1232 int result = 0;
1233 struct seq_file *m = file->private_data;
1234 struct acpi_processor *pr = m->private;
1235 char state_string[5] = "";
1236 char *charp = NULL;
1237 size_t state_val = 0;
1238 char tmpbuf[5] = "";
1240 if (!pr || (count > sizeof(state_string) - 1))
1241 return -EINVAL;
1243 if (copy_from_user(state_string, buffer, count))
1244 return -EFAULT;
1246 state_string[count] = '\0';
1247 if ((count > 0) && (state_string[count-1] == '\n'))
1248 state_string[count-1] = '\0';
1250 charp = state_string;
1251 if ((state_string[0] == 't') || (state_string[0] == 'T'))
1252 charp++;
1254 state_val = simple_strtoul(charp, NULL, 0);
1255 if (state_val >= pr->throttling.state_count)
1256 return -EINVAL;
1258 snprintf(tmpbuf, 5, "%zu", state_val);
1260 if (strcmp(tmpbuf, charp) != 0)
1261 return -EINVAL;
1263 result = acpi_processor_set_throttling(pr, state_val);
1264 if (result)
1265 return result;
1267 return count;
1270 struct file_operations acpi_processor_throttling_fops = {
1271 .owner = THIS_MODULE,
1272 .open = acpi_processor_throttling_open_fs,
1273 .read = seq_read,
1274 .write = acpi_processor_write_throttling,
1275 .llseek = seq_lseek,
1276 .release = single_release,
1277 };