ia64/linux-2.6.18-xen.hg

view drivers/acpi/processor_perflib.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents b8916f4d48f6
children
line source
1 /*
2 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 *
10 *
11 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc.,
25 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 *
27 */
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/cpufreq.h>
34 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/mutex.h>
39 #include <asm/uaccess.h>
40 #endif
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
45 #define ACPI_PROCESSOR_COMPONENT 0x01000000
46 #define ACPI_PROCESSOR_CLASS "processor"
47 #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
48 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
49 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
50 ACPI_MODULE_NAME("acpi_processor")
52 static DEFINE_MUTEX(performance_mutex);
54 /*
55 * _PPC support is implemented as a CPUfreq policy notifier:
56 * This means each time a CPUfreq driver registered also with
57 * the ACPI core is asked to change the speed policy, the maximum
58 * value is adjusted so that it is within the platform limit.
59 *
60 * Also, when a new platform limit value is detected, the CPUfreq
61 * policy is adjusted accordingly.
62 */
64 #define PPC_REGISTERED 1
65 #define PPC_IN_USE 2
67 static int acpi_processor_ppc_status = 0;
69 #ifdef CONFIG_CPU_FREQ
70 static int acpi_processor_ppc_notifier(struct notifier_block *nb,
71 unsigned long event, void *data)
72 {
73 struct cpufreq_policy *policy = data;
74 struct acpi_processor *pr;
75 unsigned int ppc = 0;
77 mutex_lock(&performance_mutex);
79 if (event != CPUFREQ_INCOMPATIBLE)
80 goto out;
82 pr = processors[policy->cpu];
83 if (!pr || !pr->performance)
84 goto out;
86 ppc = (unsigned int)pr->performance_platform_limit;
87 if (!ppc)
88 goto out;
90 if (ppc > pr->performance->state_count)
91 goto out;
93 cpufreq_verify_within_limits(policy, 0,
94 pr->performance->states[ppc].
95 core_frequency * 1000);
97 out:
98 mutex_unlock(&performance_mutex);
100 return 0;
101 }
103 static struct notifier_block acpi_ppc_notifier_block = {
104 .notifier_call = acpi_processor_ppc_notifier,
105 };
106 #endif /* CONFIG_CPU_FREQ */
108 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
109 {
110 acpi_status status = 0;
111 unsigned long ppc = 0;
114 if (!pr)
115 return -EINVAL;
117 /*
118 * _PPC indicates the maximum state currently supported by the platform
119 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
120 */
121 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
123 if (status != AE_NOT_FOUND)
124 acpi_processor_ppc_status |= PPC_IN_USE;
126 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
127 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
128 return -ENODEV;
129 }
131 pr->performance_platform_limit = (int)ppc;
133 return 0;
134 }
136 int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
137 {
138 int ret = acpi_processor_get_platform_limit(pr);
139 if (ret < 0)
140 return (ret);
141 else
142 #ifdef CONFIG_CPU_FREQ
143 return cpufreq_update_policy(pr->id);
144 #elif CONFIG_PROCESSOR_EXTERNAL_CONTROL
145 return processor_notify_external(pr,
146 PROCESSOR_PM_CHANGE, PM_TYPE_PERF);
147 #endif
148 }
150 #ifdef CONFIG_CPU_FREQ
151 void acpi_processor_ppc_init(void)
152 {
153 if (!cpufreq_register_notifier
154 (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
155 acpi_processor_ppc_status |= PPC_REGISTERED;
156 else
157 printk(KERN_DEBUG
158 "Warning: Processor Platform Limit not supported.\n");
159 }
161 void acpi_processor_ppc_exit(void)
162 {
163 if (acpi_processor_ppc_status & PPC_REGISTERED)
164 cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
165 CPUFREQ_POLICY_NOTIFIER);
167 acpi_processor_ppc_status &= ~PPC_REGISTERED;
168 }
169 #endif /* CONFIG_CPU_FREQ */
171 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
172 {
173 int result = 0;
174 acpi_status status = 0;
175 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
176 union acpi_object *pct = NULL;
177 union acpi_object obj = { 0 };
180 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
181 if (ACPI_FAILURE(status)) {
182 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
183 return -ENODEV;
184 }
186 pct = (union acpi_object *)buffer.pointer;
187 if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
188 || (pct->package.count != 2)) {
189 printk(KERN_ERR PREFIX "Invalid _PCT data\n");
190 result = -EFAULT;
191 goto end;
192 }
194 /*
195 * control_register
196 */
198 obj = pct->package.elements[0];
200 if ((obj.type != ACPI_TYPE_BUFFER)
201 || (obj.buffer.length < sizeof(struct acpi_pct_register))
202 || (obj.buffer.pointer == NULL)) {
203 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
204 result = -EFAULT;
205 goto end;
206 }
207 memcpy(&pr->performance->control_register, obj.buffer.pointer,
208 sizeof(struct acpi_pct_register));
210 /*
211 * status_register
212 */
214 obj = pct->package.elements[1];
216 if ((obj.type != ACPI_TYPE_BUFFER)
217 || (obj.buffer.length < sizeof(struct acpi_pct_register))
218 || (obj.buffer.pointer == NULL)) {
219 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
220 result = -EFAULT;
221 goto end;
222 }
224 memcpy(&pr->performance->status_register, obj.buffer.pointer,
225 sizeof(struct acpi_pct_register));
227 end:
228 kfree(buffer.pointer);
230 return result;
231 }
233 static int acpi_processor_get_performance_states(struct acpi_processor *pr)
234 {
235 int result = 0;
236 acpi_status status = AE_OK;
237 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
238 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
239 struct acpi_buffer state = { 0, NULL };
240 union acpi_object *pss = NULL;
241 int i;
244 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
245 if (ACPI_FAILURE(status)) {
246 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
247 return -ENODEV;
248 }
250 pss = (union acpi_object *)buffer.pointer;
251 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
252 printk(KERN_ERR PREFIX "Invalid _PSS data\n");
253 result = -EFAULT;
254 goto end;
255 }
257 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
258 pss->package.count));
260 pr->performance->state_count = pss->package.count;
261 pr->performance->states =
262 kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
263 GFP_KERNEL);
264 if (!pr->performance->states) {
265 result = -ENOMEM;
266 goto end;
267 }
269 for (i = 0; i < pr->performance->state_count; i++) {
271 struct acpi_processor_px *px = &(pr->performance->states[i]);
273 state.length = sizeof(struct acpi_processor_px);
274 state.pointer = px;
276 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
278 status = acpi_extract_package(&(pss->package.elements[i]),
279 &format, &state);
280 if (ACPI_FAILURE(status)) {
281 ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
282 result = -EFAULT;
283 kfree(pr->performance->states);
284 goto end;
285 }
287 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
288 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
289 i,
290 (u32) px->core_frequency,
291 (u32) px->power,
292 (u32) px->transition_latency,
293 (u32) px->bus_master_latency,
294 (u32) px->control, (u32) px->status));
296 if (!px->core_frequency) {
297 printk(KERN_ERR PREFIX
298 "Invalid _PSS data: freq is zero\n");
299 result = -EFAULT;
300 kfree(pr->performance->states);
301 goto end;
302 }
303 }
305 end:
306 kfree(buffer.pointer);
308 return result;
309 }
311 #ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
312 static
313 #endif
314 int acpi_processor_get_performance_info(struct acpi_processor *pr)
315 {
316 int result = 0;
317 acpi_status status = AE_OK;
318 acpi_handle handle = NULL;
321 if (!pr || !pr->performance || !pr->handle)
322 return -EINVAL;
324 status = acpi_get_handle(pr->handle, "_PCT", &handle);
325 if (ACPI_FAILURE(status)) {
326 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
327 "ACPI-based processor performance control unavailable\n"));
328 return -ENODEV;
329 }
331 result = acpi_processor_get_performance_control(pr);
332 if (result)
333 return result;
335 result = acpi_processor_get_performance_states(pr);
336 if (result)
337 return result;
339 result = acpi_processor_get_platform_limit(pr);
340 if (result)
341 return result;
343 return 0;
344 }
346 #ifdef CONFIG_CPU_FREQ
347 int acpi_processor_notify_smm(struct module *calling_module)
348 {
349 acpi_status status;
350 static int is_done = 0;
353 if (!(acpi_processor_ppc_status & PPC_REGISTERED))
354 return -EBUSY;
356 if (!try_module_get(calling_module))
357 return -EINVAL;
359 /* is_done is set to negative if an error occured,
360 * and to postitive if _no_ error occured, but SMM
361 * was already notified. This avoids double notification
362 * which might lead to unexpected results...
363 */
364 if (is_done > 0) {
365 module_put(calling_module);
366 return 0;
367 } else if (is_done < 0) {
368 module_put(calling_module);
369 return is_done;
370 }
372 is_done = -EIO;
374 /* Can't write pstate_cnt to smi_cmd if either value is zero */
375 if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) {
376 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n"));
377 module_put(calling_module);
378 return 0;
379 }
381 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
382 "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n",
383 acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));
385 /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use
386 * it anyway, so we need to support it... */
387 if (acpi_fadt_is_v1) {
388 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
389 "Using v1.0 FADT reserved value for pstate_cnt\n"));
390 }
392 status = acpi_os_write_port(acpi_fadt.smi_cmd,
393 (u32) acpi_fadt.pstate_cnt, 8);
394 if (ACPI_FAILURE(status)) {
395 ACPI_EXCEPTION((AE_INFO, status,
396 "Failed to write pstate_cnt [0x%x] to "
397 "smi_cmd [0x%x]", acpi_fadt.pstate_cnt,
398 acpi_fadt.smi_cmd));
399 module_put(calling_module);
400 return status;
401 }
403 /* Success. If there's no _PPC, we need to fear nothing, so
404 * we can allow the cpufreq driver to be rmmod'ed. */
405 is_done = 1;
407 if (!(acpi_processor_ppc_status & PPC_IN_USE))
408 module_put(calling_module);
410 return 0;
411 }
413 EXPORT_SYMBOL(acpi_processor_notify_smm);
414 #endif /* CONFIG_CPU_FREQ */
416 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
417 /* /proc/acpi/processor/../performance interface (DEPRECATED) */
419 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
420 static struct file_operations acpi_processor_perf_fops = {
421 .open = acpi_processor_perf_open_fs,
422 .read = seq_read,
423 .llseek = seq_lseek,
424 .release = single_release,
425 };
427 static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset)
428 {
429 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
430 int i;
433 if (!pr)
434 goto end;
436 if (!pr->performance) {
437 seq_puts(seq, "<not supported>\n");
438 goto end;
439 }
441 seq_printf(seq, "state count: %d\n"
442 "active state: P%d\n",
443 pr->performance->state_count, pr->performance->state);
445 seq_puts(seq, "states:\n");
446 for (i = 0; i < pr->performance->state_count; i++)
447 seq_printf(seq,
448 " %cP%d: %d MHz, %d mW, %d uS\n",
449 (i == pr->performance->state ? '*' : ' '), i,
450 (u32) pr->performance->states[i].core_frequency,
451 (u32) pr->performance->states[i].power,
452 (u32) pr->performance->states[i].transition_latency);
454 end:
455 return 0;
456 }
458 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
459 {
460 return single_open(file, acpi_processor_perf_seq_show,
461 PDE(inode)->data);
462 }
464 static ssize_t
465 acpi_processor_write_performance(struct file *file,
466 const char __user * buffer,
467 size_t count, loff_t * data)
468 {
469 int result = 0;
470 struct seq_file *m = (struct seq_file *)file->private_data;
471 struct acpi_processor *pr = (struct acpi_processor *)m->private;
472 struct acpi_processor_performance *perf;
473 char state_string[12] = { '\0' };
474 unsigned int new_state = 0;
475 struct cpufreq_policy policy;
478 if (!pr || (count > sizeof(state_string) - 1))
479 return -EINVAL;
481 perf = pr->performance;
482 if (!perf)
483 return -EINVAL;
485 if (copy_from_user(state_string, buffer, count))
486 return -EFAULT;
488 state_string[count] = '\0';
489 new_state = simple_strtoul(state_string, NULL, 0);
491 if (new_state >= perf->state_count)
492 return -EINVAL;
494 cpufreq_get_policy(&policy, pr->id);
496 policy.cpu = pr->id;
497 policy.min = perf->states[new_state].core_frequency * 1000;
498 policy.max = perf->states[new_state].core_frequency * 1000;
500 result = cpufreq_set_policy(&policy);
501 if (result)
502 return result;
504 return count;
505 }
507 static void acpi_cpufreq_add_file(struct acpi_processor *pr)
508 {
509 struct proc_dir_entry *entry = NULL;
510 struct acpi_device *device = NULL;
513 if (acpi_bus_get_device(pr->handle, &device))
514 return;
516 /* add file 'performance' [R/W] */
517 entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
518 S_IFREG | S_IRUGO | S_IWUSR,
519 acpi_device_dir(device));
520 if (entry){
521 acpi_processor_perf_fops.write = acpi_processor_write_performance;
522 entry->proc_fops = &acpi_processor_perf_fops;
523 entry->data = acpi_driver_data(device);
524 entry->owner = THIS_MODULE;
525 }
526 return;
527 }
529 static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
530 {
531 struct acpi_device *device = NULL;
534 if (acpi_bus_get_device(pr->handle, &device))
535 return;
537 /* remove file 'performance' */
538 remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
539 acpi_device_dir(device));
541 return;
542 }
544 #else
545 static void acpi_cpufreq_add_file(struct acpi_processor *pr)
546 {
547 return;
548 }
549 static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
550 {
551 return;
552 }
553 #endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
555 #ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
556 static
557 #endif
558 int acpi_processor_get_psd(struct acpi_processor *pr)
559 {
560 int result = 0;
561 acpi_status status = AE_OK;
562 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
563 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
564 struct acpi_buffer state = {0, NULL};
565 union acpi_object *psd = NULL;
566 struct acpi_psd_package *pdomain;
568 status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
569 if (ACPI_FAILURE(status)) {
570 return -ENODEV;
571 }
573 psd = (union acpi_object *) buffer.pointer;
574 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
575 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
576 result = -EFAULT;
577 goto end;
578 }
580 if (psd->package.count != 1) {
581 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
582 result = -EFAULT;
583 goto end;
584 }
586 pdomain = &(pr->performance->domain_info);
588 state.length = sizeof(struct acpi_psd_package);
589 state.pointer = pdomain;
591 status = acpi_extract_package(&(psd->package.elements[0]),
592 &format, &state);
593 if (ACPI_FAILURE(status)) {
594 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
595 result = -EFAULT;
596 goto end;
597 }
599 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
600 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n"));
601 result = -EFAULT;
602 goto end;
603 }
605 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
606 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n"));
607 result = -EFAULT;
608 goto end;
609 }
611 end:
612 kfree(buffer.pointer);
613 return result;
614 }
616 int acpi_processor_preregister_performance(
617 struct acpi_processor_performance **performance)
618 {
619 int count, count_target;
620 int retval = 0;
621 unsigned int i, j;
622 cpumask_t covered_cpus;
623 struct acpi_processor *pr;
624 struct acpi_psd_package *pdomain;
625 struct acpi_processor *match_pr;
626 struct acpi_psd_package *match_pdomain;
628 mutex_lock(&performance_mutex);
630 retval = 0;
632 /* Call _PSD for all CPUs */
633 for_each_possible_cpu(i) {
634 pr = processors[i];
635 if (!pr) {
636 /* Look only at processors in ACPI namespace */
637 continue;
638 }
640 if (pr->performance) {
641 retval = -EBUSY;
642 continue;
643 }
645 if (!performance || !performance[i]) {
646 retval = -EINVAL;
647 continue;
648 }
650 pr->performance = performance[i];
651 cpu_set(i, pr->performance->shared_cpu_map);
652 if (acpi_processor_get_psd(pr)) {
653 retval = -EINVAL;
654 continue;
655 }
656 }
657 if (retval)
658 goto err_ret;
660 /*
661 * Now that we have _PSD data from all CPUs, lets setup P-state
662 * domain info.
663 */
664 for_each_possible_cpu(i) {
665 pr = processors[i];
666 if (!pr)
667 continue;
669 /* Basic validity check for domain info */
670 pdomain = &(pr->performance->domain_info);
671 if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
672 (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
673 retval = -EINVAL;
674 goto err_ret;
675 }
676 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
677 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
678 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
679 retval = -EINVAL;
680 goto err_ret;
681 }
682 }
684 cpus_clear(covered_cpus);
685 for_each_possible_cpu(i) {
686 pr = processors[i];
687 if (!pr)
688 continue;
690 if (cpu_isset(i, covered_cpus))
691 continue;
693 pdomain = &(pr->performance->domain_info);
694 cpu_set(i, pr->performance->shared_cpu_map);
695 cpu_set(i, covered_cpus);
696 if (pdomain->num_processors <= 1)
697 continue;
699 /* Validate the Domain info */
700 count_target = pdomain->num_processors;
701 count = 1;
702 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
703 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
704 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
705 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
706 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
707 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
709 for_each_possible_cpu(j) {
710 if (i == j)
711 continue;
713 match_pr = processors[j];
714 if (!match_pr)
715 continue;
717 match_pdomain = &(match_pr->performance->domain_info);
718 if (match_pdomain->domain != pdomain->domain)
719 continue;
721 /* Here i and j are in the same domain */
723 if (match_pdomain->num_processors != count_target) {
724 retval = -EINVAL;
725 goto err_ret;
726 }
728 if (pdomain->coord_type != match_pdomain->coord_type) {
729 retval = -EINVAL;
730 goto err_ret;
731 }
733 cpu_set(j, covered_cpus);
734 cpu_set(j, pr->performance->shared_cpu_map);
735 count++;
736 }
738 for_each_possible_cpu(j) {
739 if (i == j)
740 continue;
742 match_pr = processors[j];
743 if (!match_pr)
744 continue;
746 match_pdomain = &(match_pr->performance->domain_info);
747 if (match_pdomain->domain != pdomain->domain)
748 continue;
750 match_pr->performance->shared_type =
751 pr->performance->shared_type;
752 match_pr->performance->shared_cpu_map =
753 pr->performance->shared_cpu_map;
754 }
755 }
757 err_ret:
758 if (retval) {
759 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
760 }
762 for_each_possible_cpu(i) {
763 pr = processors[i];
764 if (!pr || !pr->performance)
765 continue;
767 /* Assume no coordination on any error parsing domain info */
768 if (retval) {
769 cpus_clear(pr->performance->shared_cpu_map);
770 cpu_set(i, pr->performance->shared_cpu_map);
771 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
772 }
773 pr->performance = NULL; /* Will be set for real in register */
774 }
776 mutex_unlock(&performance_mutex);
777 return retval;
778 }
779 EXPORT_SYMBOL(acpi_processor_preregister_performance);
782 int
783 acpi_processor_register_performance(struct acpi_processor_performance
784 *performance, unsigned int cpu)
785 {
786 struct acpi_processor *pr;
789 if (!(acpi_processor_ppc_status & PPC_REGISTERED))
790 return -EINVAL;
792 mutex_lock(&performance_mutex);
794 pr = processors[cpu];
795 if (!pr) {
796 mutex_unlock(&performance_mutex);
797 return -ENODEV;
798 }
800 if (pr->performance) {
801 mutex_unlock(&performance_mutex);
802 return -EBUSY;
803 }
805 WARN_ON(!performance);
807 pr->performance = performance;
809 if (acpi_processor_get_performance_info(pr)) {
810 pr->performance = NULL;
811 mutex_unlock(&performance_mutex);
812 return -EIO;
813 }
815 acpi_cpufreq_add_file(pr);
817 mutex_unlock(&performance_mutex);
818 return 0;
819 }
821 EXPORT_SYMBOL(acpi_processor_register_performance);
823 void
824 acpi_processor_unregister_performance(struct acpi_processor_performance
825 *performance, unsigned int cpu)
826 {
827 struct acpi_processor *pr;
830 mutex_lock(&performance_mutex);
832 pr = processors[cpu];
833 if (!pr) {
834 mutex_unlock(&performance_mutex);
835 return;
836 }
838 if (pr->performance)
839 kfree(pr->performance->states);
840 pr->performance = NULL;
842 acpi_cpufreq_remove_file(pr);
844 mutex_unlock(&performance_mutex);
846 return;
847 }
849 EXPORT_SYMBOL(acpi_processor_unregister_performance);