ia64/linux-2.6.18-xen.hg

view drivers/acpi/processor_perflib.c @ 531:3c564f80f2ef

Notify ACPI processor events to external logic, including C/P/T and hotplug, etc.

Signed-off-by: Tian Kevin <kevin.tian@intel.com>
Signed-off-by: Wei Gang <gang.wei@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu May 01 10:52:31 2008 +0100 (2008-05-01)
parents 831230e53067
children e39cf97647af
line source
1 /*
2 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 *
10 *
11 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc.,
25 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 *
27 */
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/cpufreq.h>
34 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/mutex.h>
39 #include <asm/uaccess.h>
40 #endif
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
45 #define ACPI_PROCESSOR_COMPONENT 0x01000000
46 #define ACPI_PROCESSOR_CLASS "processor"
47 #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
48 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
49 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
50 ACPI_MODULE_NAME("acpi_processor")
52 static DEFINE_MUTEX(performance_mutex);
54 /*
55 * _PPC support is implemented as a CPUfreq policy notifier:
56 * This means each time a CPUfreq driver registered also with
57 * the ACPI core is asked to change the speed policy, the maximum
58 * value is adjusted so that it is within the platform limit.
59 *
60 * Also, when a new platform limit value is detected, the CPUfreq
61 * policy is adjusted accordingly.
62 */
64 #define PPC_REGISTERED 1
65 #define PPC_IN_USE 2
67 static int acpi_processor_ppc_status = 0;
69 static int acpi_processor_ppc_notifier(struct notifier_block *nb,
70 unsigned long event, void *data)
71 {
72 struct cpufreq_policy *policy = data;
73 struct acpi_processor *pr;
74 unsigned int ppc = 0;
76 mutex_lock(&performance_mutex);
78 if (event != CPUFREQ_INCOMPATIBLE)
79 goto out;
81 pr = processors[policy->cpu];
82 if (!pr || !pr->performance)
83 goto out;
85 ppc = (unsigned int)pr->performance_platform_limit;
86 if (!ppc)
87 goto out;
89 if (ppc > pr->performance->state_count)
90 goto out;
92 cpufreq_verify_within_limits(policy, 0,
93 pr->performance->states[ppc].
94 core_frequency * 1000);
96 out:
97 mutex_unlock(&performance_mutex);
99 return 0;
100 }
102 static struct notifier_block acpi_ppc_notifier_block = {
103 .notifier_call = acpi_processor_ppc_notifier,
104 };
106 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
107 {
108 acpi_status status = 0;
109 unsigned long ppc = 0;
112 if (!pr)
113 return -EINVAL;
115 /*
116 * _PPC indicates the maximum state currently supported by the platform
117 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
118 */
119 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
121 if (status != AE_NOT_FOUND)
122 acpi_processor_ppc_status |= PPC_IN_USE;
124 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
125 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
126 return -ENODEV;
127 }
129 pr->performance_platform_limit = (int)ppc;
131 return 0;
132 }
134 int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
135 {
136 int ret = acpi_processor_get_platform_limit(pr);
137 if (ret < 0)
138 return (ret);
139 else if (!processor_pmperf_external())
140 return cpufreq_update_policy(pr->id);
141 else
142 return processor_notify_external(pr,
143 PROCESSOR_PM_CHANGE, PM_TYPE_PERF);
144 }
146 void acpi_processor_ppc_init(void)
147 {
148 if (!cpufreq_register_notifier
149 (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
150 acpi_processor_ppc_status |= PPC_REGISTERED;
151 else
152 printk(KERN_DEBUG
153 "Warning: Processor Platform Limit not supported.\n");
154 }
156 void acpi_processor_ppc_exit(void)
157 {
158 if (acpi_processor_ppc_status & PPC_REGISTERED)
159 cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
160 CPUFREQ_POLICY_NOTIFIER);
162 acpi_processor_ppc_status &= ~PPC_REGISTERED;
163 }
165 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
166 {
167 int result = 0;
168 acpi_status status = 0;
169 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
170 union acpi_object *pct = NULL;
171 union acpi_object obj = { 0 };
174 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
175 if (ACPI_FAILURE(status)) {
176 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
177 return -ENODEV;
178 }
180 pct = (union acpi_object *)buffer.pointer;
181 if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
182 || (pct->package.count != 2)) {
183 printk(KERN_ERR PREFIX "Invalid _PCT data\n");
184 result = -EFAULT;
185 goto end;
186 }
188 /*
189 * control_register
190 */
192 obj = pct->package.elements[0];
194 if ((obj.type != ACPI_TYPE_BUFFER)
195 || (obj.buffer.length < sizeof(struct acpi_pct_register))
196 || (obj.buffer.pointer == NULL)) {
197 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
198 result = -EFAULT;
199 goto end;
200 }
201 memcpy(&pr->performance->control_register, obj.buffer.pointer,
202 sizeof(struct acpi_pct_register));
204 /*
205 * status_register
206 */
208 obj = pct->package.elements[1];
210 if ((obj.type != ACPI_TYPE_BUFFER)
211 || (obj.buffer.length < sizeof(struct acpi_pct_register))
212 || (obj.buffer.pointer == NULL)) {
213 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
214 result = -EFAULT;
215 goto end;
216 }
218 memcpy(&pr->performance->status_register, obj.buffer.pointer,
219 sizeof(struct acpi_pct_register));
221 end:
222 kfree(buffer.pointer);
224 return result;
225 }
227 static int acpi_processor_get_performance_states(struct acpi_processor *pr)
228 {
229 int result = 0;
230 acpi_status status = AE_OK;
231 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
232 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
233 struct acpi_buffer state = { 0, NULL };
234 union acpi_object *pss = NULL;
235 int i;
238 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
239 if (ACPI_FAILURE(status)) {
240 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
241 return -ENODEV;
242 }
244 pss = (union acpi_object *)buffer.pointer;
245 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
246 printk(KERN_ERR PREFIX "Invalid _PSS data\n");
247 result = -EFAULT;
248 goto end;
249 }
251 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
252 pss->package.count));
254 pr->performance->state_count = pss->package.count;
255 pr->performance->states =
256 kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
257 GFP_KERNEL);
258 if (!pr->performance->states) {
259 result = -ENOMEM;
260 goto end;
261 }
263 for (i = 0; i < pr->performance->state_count; i++) {
265 struct acpi_processor_px *px = &(pr->performance->states[i]);
267 state.length = sizeof(struct acpi_processor_px);
268 state.pointer = px;
270 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
272 status = acpi_extract_package(&(pss->package.elements[i]),
273 &format, &state);
274 if (ACPI_FAILURE(status)) {
275 ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
276 result = -EFAULT;
277 kfree(pr->performance->states);
278 goto end;
279 }
281 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
282 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
283 i,
284 (u32) px->core_frequency,
285 (u32) px->power,
286 (u32) px->transition_latency,
287 (u32) px->bus_master_latency,
288 (u32) px->control, (u32) px->status));
290 if (!px->core_frequency) {
291 printk(KERN_ERR PREFIX
292 "Invalid _PSS data: freq is zero\n");
293 result = -EFAULT;
294 kfree(pr->performance->states);
295 goto end;
296 }
297 }
299 end:
300 kfree(buffer.pointer);
302 return result;
303 }
305 static int acpi_processor_get_performance_info(struct acpi_processor *pr)
306 {
307 int result = 0;
308 acpi_status status = AE_OK;
309 acpi_handle handle = NULL;
312 if (!pr || !pr->performance || !pr->handle)
313 return -EINVAL;
315 status = acpi_get_handle(pr->handle, "_PCT", &handle);
316 if (ACPI_FAILURE(status)) {
317 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
318 "ACPI-based processor performance control unavailable\n"));
319 return -ENODEV;
320 }
322 result = acpi_processor_get_performance_control(pr);
323 if (result)
324 return result;
326 result = acpi_processor_get_performance_states(pr);
327 if (result)
328 return result;
330 result = acpi_processor_get_platform_limit(pr);
331 if (result)
332 return result;
334 return 0;
335 }
337 int acpi_processor_notify_smm(struct module *calling_module)
338 {
339 acpi_status status;
340 static int is_done = 0;
343 if (!(acpi_processor_ppc_status & PPC_REGISTERED))
344 return -EBUSY;
346 if (!try_module_get(calling_module))
347 return -EINVAL;
349 /* is_done is set to negative if an error occured,
350 * and to postitive if _no_ error occured, but SMM
351 * was already notified. This avoids double notification
352 * which might lead to unexpected results...
353 */
354 if (is_done > 0) {
355 module_put(calling_module);
356 return 0;
357 } else if (is_done < 0) {
358 module_put(calling_module);
359 return is_done;
360 }
362 is_done = -EIO;
364 /* Can't write pstate_cnt to smi_cmd if either value is zero */
365 if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) {
366 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n"));
367 module_put(calling_module);
368 return 0;
369 }
371 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
372 "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n",
373 acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));
375 /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use
376 * it anyway, so we need to support it... */
377 if (acpi_fadt_is_v1) {
378 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
379 "Using v1.0 FADT reserved value for pstate_cnt\n"));
380 }
382 status = acpi_os_write_port(acpi_fadt.smi_cmd,
383 (u32) acpi_fadt.pstate_cnt, 8);
384 if (ACPI_FAILURE(status)) {
385 ACPI_EXCEPTION((AE_INFO, status,
386 "Failed to write pstate_cnt [0x%x] to "
387 "smi_cmd [0x%x]", acpi_fadt.pstate_cnt,
388 acpi_fadt.smi_cmd));
389 module_put(calling_module);
390 return status;
391 }
393 /* Success. If there's no _PPC, we need to fear nothing, so
394 * we can allow the cpufreq driver to be rmmod'ed. */
395 is_done = 1;
397 if (!(acpi_processor_ppc_status & PPC_IN_USE))
398 module_put(calling_module);
400 return 0;
401 }
403 EXPORT_SYMBOL(acpi_processor_notify_smm);
405 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
406 /* /proc/acpi/processor/../performance interface (DEPRECATED) */
408 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
409 static struct file_operations acpi_processor_perf_fops = {
410 .open = acpi_processor_perf_open_fs,
411 .read = seq_read,
412 .llseek = seq_lseek,
413 .release = single_release,
414 };
416 static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset)
417 {
418 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
419 int i;
422 if (!pr)
423 goto end;
425 if (!pr->performance) {
426 seq_puts(seq, "<not supported>\n");
427 goto end;
428 }
430 seq_printf(seq, "state count: %d\n"
431 "active state: P%d\n",
432 pr->performance->state_count, pr->performance->state);
434 seq_puts(seq, "states:\n");
435 for (i = 0; i < pr->performance->state_count; i++)
436 seq_printf(seq,
437 " %cP%d: %d MHz, %d mW, %d uS\n",
438 (i == pr->performance->state ? '*' : ' '), i,
439 (u32) pr->performance->states[i].core_frequency,
440 (u32) pr->performance->states[i].power,
441 (u32) pr->performance->states[i].transition_latency);
443 end:
444 return 0;
445 }
447 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
448 {
449 return single_open(file, acpi_processor_perf_seq_show,
450 PDE(inode)->data);
451 }
453 static ssize_t
454 acpi_processor_write_performance(struct file *file,
455 const char __user * buffer,
456 size_t count, loff_t * data)
457 {
458 int result = 0;
459 struct seq_file *m = (struct seq_file *)file->private_data;
460 struct acpi_processor *pr = (struct acpi_processor *)m->private;
461 struct acpi_processor_performance *perf;
462 char state_string[12] = { '\0' };
463 unsigned int new_state = 0;
464 struct cpufreq_policy policy;
467 if (!pr || (count > sizeof(state_string) - 1))
468 return -EINVAL;
470 perf = pr->performance;
471 if (!perf)
472 return -EINVAL;
474 if (copy_from_user(state_string, buffer, count))
475 return -EFAULT;
477 state_string[count] = '\0';
478 new_state = simple_strtoul(state_string, NULL, 0);
480 if (new_state >= perf->state_count)
481 return -EINVAL;
483 cpufreq_get_policy(&policy, pr->id);
485 policy.cpu = pr->id;
486 policy.min = perf->states[new_state].core_frequency * 1000;
487 policy.max = perf->states[new_state].core_frequency * 1000;
489 result = cpufreq_set_policy(&policy);
490 if (result)
491 return result;
493 return count;
494 }
496 static void acpi_cpufreq_add_file(struct acpi_processor *pr)
497 {
498 struct proc_dir_entry *entry = NULL;
499 struct acpi_device *device = NULL;
502 if (acpi_bus_get_device(pr->handle, &device))
503 return;
505 /* add file 'performance' [R/W] */
506 entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
507 S_IFREG | S_IRUGO | S_IWUSR,
508 acpi_device_dir(device));
509 if (entry){
510 acpi_processor_perf_fops.write = acpi_processor_write_performance;
511 entry->proc_fops = &acpi_processor_perf_fops;
512 entry->data = acpi_driver_data(device);
513 entry->owner = THIS_MODULE;
514 }
515 return;
516 }
518 static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
519 {
520 struct acpi_device *device = NULL;
523 if (acpi_bus_get_device(pr->handle, &device))
524 return;
526 /* remove file 'performance' */
527 remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
528 acpi_device_dir(device));
530 return;
531 }
533 #else
534 static void acpi_cpufreq_add_file(struct acpi_processor *pr)
535 {
536 return;
537 }
538 static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
539 {
540 return;
541 }
542 #endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
544 static int acpi_processor_get_psd(struct acpi_processor *pr)
545 {
546 int result = 0;
547 acpi_status status = AE_OK;
548 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
549 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
550 struct acpi_buffer state = {0, NULL};
551 union acpi_object *psd = NULL;
552 struct acpi_psd_package *pdomain;
554 status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
555 if (ACPI_FAILURE(status)) {
556 return -ENODEV;
557 }
559 psd = (union acpi_object *) buffer.pointer;
560 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
561 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
562 result = -EFAULT;
563 goto end;
564 }
566 if (psd->package.count != 1) {
567 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
568 result = -EFAULT;
569 goto end;
570 }
572 pdomain = &(pr->performance->domain_info);
574 state.length = sizeof(struct acpi_psd_package);
575 state.pointer = pdomain;
577 status = acpi_extract_package(&(psd->package.elements[0]),
578 &format, &state);
579 if (ACPI_FAILURE(status)) {
580 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
581 result = -EFAULT;
582 goto end;
583 }
585 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
586 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n"));
587 result = -EFAULT;
588 goto end;
589 }
591 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
592 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n"));
593 result = -EFAULT;
594 goto end;
595 }
597 end:
598 kfree(buffer.pointer);
599 return result;
600 }
602 int acpi_processor_preregister_performance(
603 struct acpi_processor_performance **performance)
604 {
605 int count, count_target;
606 int retval = 0;
607 unsigned int i, j;
608 cpumask_t covered_cpus;
609 struct acpi_processor *pr;
610 struct acpi_psd_package *pdomain;
611 struct acpi_processor *match_pr;
612 struct acpi_psd_package *match_pdomain;
614 mutex_lock(&performance_mutex);
616 retval = 0;
618 /* Call _PSD for all CPUs */
619 for_each_possible_cpu(i) {
620 pr = processors[i];
621 if (!pr) {
622 /* Look only at processors in ACPI namespace */
623 continue;
624 }
626 if (pr->performance) {
627 retval = -EBUSY;
628 continue;
629 }
631 if (!performance || !performance[i]) {
632 retval = -EINVAL;
633 continue;
634 }
636 pr->performance = performance[i];
637 cpu_set(i, pr->performance->shared_cpu_map);
638 if (acpi_processor_get_psd(pr)) {
639 retval = -EINVAL;
640 continue;
641 }
642 }
643 if (retval)
644 goto err_ret;
646 /*
647 * Now that we have _PSD data from all CPUs, lets setup P-state
648 * domain info.
649 */
650 for_each_possible_cpu(i) {
651 pr = processors[i];
652 if (!pr)
653 continue;
655 /* Basic validity check for domain info */
656 pdomain = &(pr->performance->domain_info);
657 if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
658 (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
659 retval = -EINVAL;
660 goto err_ret;
661 }
662 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
663 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
664 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
665 retval = -EINVAL;
666 goto err_ret;
667 }
668 }
670 cpus_clear(covered_cpus);
671 for_each_possible_cpu(i) {
672 pr = processors[i];
673 if (!pr)
674 continue;
676 if (cpu_isset(i, covered_cpus))
677 continue;
679 pdomain = &(pr->performance->domain_info);
680 cpu_set(i, pr->performance->shared_cpu_map);
681 cpu_set(i, covered_cpus);
682 if (pdomain->num_processors <= 1)
683 continue;
685 /* Validate the Domain info */
686 count_target = pdomain->num_processors;
687 count = 1;
688 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
689 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
690 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
691 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
692 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
693 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
695 for_each_possible_cpu(j) {
696 if (i == j)
697 continue;
699 match_pr = processors[j];
700 if (!match_pr)
701 continue;
703 match_pdomain = &(match_pr->performance->domain_info);
704 if (match_pdomain->domain != pdomain->domain)
705 continue;
707 /* Here i and j are in the same domain */
709 if (match_pdomain->num_processors != count_target) {
710 retval = -EINVAL;
711 goto err_ret;
712 }
714 if (pdomain->coord_type != match_pdomain->coord_type) {
715 retval = -EINVAL;
716 goto err_ret;
717 }
719 cpu_set(j, covered_cpus);
720 cpu_set(j, pr->performance->shared_cpu_map);
721 count++;
722 }
724 for_each_possible_cpu(j) {
725 if (i == j)
726 continue;
728 match_pr = processors[j];
729 if (!match_pr)
730 continue;
732 match_pdomain = &(match_pr->performance->domain_info);
733 if (match_pdomain->domain != pdomain->domain)
734 continue;
736 match_pr->performance->shared_type =
737 pr->performance->shared_type;
738 match_pr->performance->shared_cpu_map =
739 pr->performance->shared_cpu_map;
740 }
741 }
743 err_ret:
744 if (retval) {
745 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
746 }
748 for_each_possible_cpu(i) {
749 pr = processors[i];
750 if (!pr || !pr->performance)
751 continue;
753 /* Assume no coordination on any error parsing domain info */
754 if (retval) {
755 cpus_clear(pr->performance->shared_cpu_map);
756 cpu_set(i, pr->performance->shared_cpu_map);
757 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
758 }
759 pr->performance = NULL; /* Will be set for real in register */
760 }
762 mutex_unlock(&performance_mutex);
763 return retval;
764 }
765 EXPORT_SYMBOL(acpi_processor_preregister_performance);
768 int
769 acpi_processor_register_performance(struct acpi_processor_performance
770 *performance, unsigned int cpu)
771 {
772 struct acpi_processor *pr;
775 if (!(acpi_processor_ppc_status & PPC_REGISTERED))
776 return -EINVAL;
778 mutex_lock(&performance_mutex);
780 pr = processors[cpu];
781 if (!pr) {
782 mutex_unlock(&performance_mutex);
783 return -ENODEV;
784 }
786 if (pr->performance) {
787 mutex_unlock(&performance_mutex);
788 return -EBUSY;
789 }
791 WARN_ON(!performance);
793 pr->performance = performance;
795 if (acpi_processor_get_performance_info(pr)) {
796 pr->performance = NULL;
797 mutex_unlock(&performance_mutex);
798 return -EIO;
799 }
801 acpi_cpufreq_add_file(pr);
803 mutex_unlock(&performance_mutex);
804 return 0;
805 }
807 EXPORT_SYMBOL(acpi_processor_register_performance);
809 void
810 acpi_processor_unregister_performance(struct acpi_processor_performance
811 *performance, unsigned int cpu)
812 {
813 struct acpi_processor *pr;
816 mutex_lock(&performance_mutex);
818 pr = processors[cpu];
819 if (!pr) {
820 mutex_unlock(&performance_mutex);
821 return;
822 }
824 if (pr->performance)
825 kfree(pr->performance->states);
826 pr->performance = NULL;
828 acpi_cpufreq_remove_file(pr);
830 mutex_unlock(&performance_mutex);
832 return;
833 }
835 EXPORT_SYMBOL(acpi_processor_unregister_performance);