ia64/xen-unstable

view xen/arch/x86/acpi/cpu_idle.c @ 18520:9cc4cebee50b

CPUIDLE: Support multiple C3 states

There may be multiple ACPI C3 states reported by BIOS. Those C3 states
may be different on latency & power. So made some modification to
support this case.

Signed-off-by: Wei Gang <gang.wei@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Sep 22 11:21:31 2008 +0100 (2008-09-22)
parents 706844309f36
children e61c7833dc9d
line source
1 /*
2 * cpu_idle - xen idle state module derived from Linux
3 * drivers/acpi/processor_idle.c &
4 * arch/x86/kernel/acpi/cstate.c
5 *
6 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
7 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
8 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
9 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10 * - Added processor hotplug support
11 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
12 * - Added support for C3 on SMP
13 * Copyright (C) 2007, 2008 Intel Corporation
14 *
15 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or (at
20 * your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful, but
23 * WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 * General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License along
28 * with this program; if not, write to the Free Software Foundation, Inc.,
29 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
30 *
31 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
32 */
34 #include <xen/config.h>
35 #include <xen/errno.h>
36 #include <xen/lib.h>
37 #include <xen/types.h>
38 #include <xen/acpi.h>
39 #include <xen/smp.h>
40 #include <xen/guest_access.h>
41 #include <xen/keyhandler.h>
42 #include <xen/cpuidle.h>
43 #include <asm/cache.h>
44 #include <asm/io.h>
45 #include <asm/hpet.h>
46 #include <asm/processor.h>
47 #include <public/platform.h>
48 #include <public/sysctl.h>
50 #define DEBUG_PM_CX
52 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
53 #define PM_TIMER_TICKS_TO_US(t) ((t * 1000) / (PM_TIMER_FREQUENCY / 1000))
54 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
55 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
57 static void (*lapic_timer_off)(void);
58 static void (*lapic_timer_on)(void);
60 extern u32 pmtmr_ioport;
61 extern void (*pm_idle) (void);
63 static void (*pm_idle_save) (void) __read_mostly;
64 unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER - 1;
65 integer_param("max_cstate", max_cstate);
67 static struct acpi_processor_power processor_powers[NR_CPUS];
69 static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power)
70 {
71 uint32_t i;
73 printk("==cpu%d==\n", cpu);
74 printk("active state:\t\tC%d\n",
75 power->last_state ? (int)(power->last_state - power->states) : -1);
76 printk("max_cstate:\t\tC%d\n", max_cstate);
77 printk("states:\n");
79 for ( i = 1; i < power->count; i++ )
80 {
81 printk((power->last_state == &power->states[i]) ? " *" : " ");
82 printk("C%d:\t\t", i);
83 printk("type[C%d] ", power->states[i].type);
84 printk("latency[%03d] ", power->states[i].latency);
85 printk("usage[%08d] ", power->states[i].usage);
86 printk("duration[%"PRId64"]\n", power->states[i].time);
87 }
88 }
90 static void dump_cx(unsigned char key)
91 {
92 for( int i = 0; i < num_online_cpus(); i++ )
93 print_acpi_power(i, &processor_powers[i]);
94 }
96 static int __init cpu_idle_key_init(void)
97 {
98 register_keyhandler(
99 'c', dump_cx, "dump cx structures");
100 return 0;
101 }
102 __initcall(cpu_idle_key_init);
104 static inline u32 ticks_elapsed(u32 t1, u32 t2)
105 {
106 if ( t2 >= t1 )
107 return (t2 - t1);
108 else if ( !(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) )
109 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
110 else
111 return ((0xFFFFFFFF - t1) + t2);
112 }
114 static void acpi_safe_halt(void)
115 {
116 smp_mb__after_clear_bit();
117 safe_halt();
118 }
120 #define MWAIT_ECX_INTERRUPT_BREAK (0x1)
122 static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
123 {
124 __monitor((void *)current, 0, 0);
125 smp_mb();
126 __mwait(eax, ecx);
127 }
129 static void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
130 {
131 mwait_idle_with_hints(cx->address, MWAIT_ECX_INTERRUPT_BREAK);
132 }
134 static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
135 {
136 if ( cx->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE )
137 {
138 /* Call into architectural FFH based C-state */
139 acpi_processor_ffh_cstate_enter(cx);
140 }
141 else
142 {
143 int unused;
144 /* IO port based C-state */
145 inb(cx->address);
146 /* Dummy wait op - must do something useless after P_LVL2 read
147 because chipsets cannot guarantee that STPCLK# signal
148 gets asserted in time to freeze execution properly. */
149 unused = inl(pmtmr_ioport);
150 }
151 }
153 static inline void acpi_idle_update_bm_rld(struct acpi_processor_power *power,
154 struct acpi_processor_cx *target)
155 {
156 if ( !power->flags.bm_check )
157 return;
159 if ( power->flags.bm_rld_set && target->type != ACPI_STATE_C3 )
160 {
161 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
162 power->flags.bm_rld_set = 0;
163 }
165 if ( !power->flags.bm_rld_set && target->type == ACPI_STATE_C3 )
166 {
167 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
168 power->flags.bm_rld_set = 1;
169 }
170 }
172 static int acpi_idle_bm_check(void)
173 {
174 u32 bm_status = 0;
176 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
177 if ( bm_status )
178 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
179 /*
180 * TBD: PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
181 * the true state of bus mastering activity; forcing us to
182 * manually check the BMIDEA bit of each IDE channel.
183 */
184 return bm_status;
185 }
187 static struct {
188 spinlock_t lock;
189 unsigned int count;
190 } c3_cpu_status = { .lock = SPIN_LOCK_UNLOCKED };
192 static void acpi_processor_idle(void)
193 {
194 struct acpi_processor_power *power = NULL;
195 struct acpi_processor_cx *cx = NULL;
196 int next_state;
197 int sleep_ticks = 0;
198 u32 t1, t2 = 0;
200 power = &processor_powers[smp_processor_id()];
202 /*
203 * Interrupts must be disabled during bus mastering calculations and
204 * for C2/C3 transitions.
205 */
206 local_irq_disable();
208 if ( softirq_pending(smp_processor_id()) )
209 {
210 local_irq_enable();
211 return;
212 }
214 next_state = cpuidle_current_governor->select(power);
215 if ( next_state > 0 )
216 {
217 cx = &power->states[next_state];
218 if ( power->flags.bm_check && acpi_idle_bm_check()
219 && cx->type == ACPI_STATE_C3 )
220 cx = power->safe_state;
221 if ( cx - &power->states[0] > max_cstate )
222 cx = &power->states[max_cstate];
223 }
224 if ( !cx )
225 {
226 if ( pm_idle_save )
227 {
228 printk(XENLOG_DEBUG "call pm_idle_save()\n");
229 pm_idle_save();
230 }
231 else
232 {
233 printk(XENLOG_DEBUG "call acpi_safe_halt()\n");
234 acpi_safe_halt();
235 }
236 return;
237 }
239 power->last_state = cx;
241 /*
242 * Sleep:
243 * ------
244 * Invoke the current Cx state to put the processor to sleep.
245 */
246 acpi_idle_update_bm_rld(power, cx);
248 switch ( cx->type )
249 {
250 case ACPI_STATE_C1:
251 /*
252 * Invoke C1.
253 * Use the appropriate idle routine, the one that would
254 * be used without acpi C-states.
255 */
256 if ( pm_idle_save )
257 pm_idle_save();
258 else
259 acpi_safe_halt();
261 /*
262 * TBD: Can't get time duration while in C1, as resumes
263 * go to an ISR rather than here. Need to instrument
264 * base interrupt handler.
265 */
266 sleep_ticks = 0xFFFFFFFF;
267 break;
269 case ACPI_STATE_C2:
270 /* Get start time (ticks) */
271 t1 = inl(pmtmr_ioport);
272 /* Invoke C2 */
273 acpi_idle_do_entry(cx);
274 /* Get end time (ticks) */
275 t2 = inl(pmtmr_ioport);
277 /* Re-enable interrupts */
278 local_irq_enable();
279 /* Compute time (ticks) that we were actually asleep */
280 sleep_ticks = ticks_elapsed(t1, t2);
281 break;
283 case ACPI_STATE_C3:
284 /*
285 * disable bus master
286 * bm_check implies we need ARB_DIS
287 * !bm_check implies we need cache flush
288 * bm_control implies whether we can do ARB_DIS
289 *
290 * That leaves a case where bm_check is set and bm_control is
291 * not set. In that case we cannot do much, we enter C3
292 * without doing anything.
293 */
294 if ( power->flags.bm_check && power->flags.bm_control )
295 {
296 spin_lock(&c3_cpu_status.lock);
297 if ( ++c3_cpu_status.count == num_online_cpus() )
298 {
299 /*
300 * All CPUs are trying to go to C3
301 * Disable bus master arbitration
302 */
303 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
304 }
305 spin_unlock(&c3_cpu_status.lock);
306 }
307 else if ( !power->flags.bm_check )
308 {
309 /* SMP with no shared cache... Invalidate cache */
310 ACPI_FLUSH_CPU_CACHE();
311 }
313 /*
314 * Before invoking C3, be aware that TSC/APIC timer may be
315 * stopped by H/W. Without carefully handling of TSC/APIC stop issues,
316 * deep C state can't work correctly.
317 */
318 /* preparing TSC stop */
319 cstate_save_tsc();
320 /* preparing APIC stop */
321 lapic_timer_off();
323 /* Get start time (ticks) */
324 t1 = inl(pmtmr_ioport);
325 /* Invoke C3 */
326 acpi_idle_do_entry(cx);
327 /* Get end time (ticks) */
328 t2 = inl(pmtmr_ioport);
330 /* recovering TSC */
331 cstate_restore_tsc();
333 if ( power->flags.bm_check && power->flags.bm_control )
334 {
335 /* Enable bus master arbitration */
336 spin_lock(&c3_cpu_status.lock);
337 if ( c3_cpu_status.count-- == num_online_cpus() )
338 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
339 spin_unlock(&c3_cpu_status.lock);
340 }
342 /* Re-enable interrupts */
343 local_irq_enable();
344 /* recovering APIC */
345 lapic_timer_on();
346 /* Compute time (ticks) that we were actually asleep */
347 sleep_ticks = ticks_elapsed(t1, t2);
349 break;
351 default:
352 local_irq_enable();
353 return;
354 }
356 cx->usage++;
357 if ( sleep_ticks > 0 )
358 {
359 power->last_residency = PM_TIMER_TICKS_TO_US(sleep_ticks);
360 cx->time += sleep_ticks;
361 }
363 if ( cpuidle_current_governor->reflect )
364 cpuidle_current_governor->reflect(power);
365 }
367 static int init_cx_pminfo(struct acpi_processor_power *acpi_power)
368 {
369 memset(acpi_power, 0, sizeof(*acpi_power));
371 acpi_power->states[ACPI_STATE_C1].type = ACPI_STATE_C1;
373 acpi_power->states[ACPI_STATE_C0].valid = 1;
374 acpi_power->states[ACPI_STATE_C1].valid = 1;
376 acpi_power->count = 2;
378 return 0;
379 }
381 #define CPUID_MWAIT_LEAF (5)
382 #define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
383 #define CPUID5_ECX_INTERRUPT_BREAK (0x2)
385 #define MWAIT_ECX_INTERRUPT_BREAK (0x1)
387 #define MWAIT_SUBSTATE_MASK (0xf)
388 #define MWAIT_SUBSTATE_SIZE (4)
390 static int acpi_processor_ffh_cstate_probe(xen_processor_cx_t *cx)
391 {
392 struct cpuinfo_x86 *c = &current_cpu_data;
393 unsigned int eax, ebx, ecx, edx;
394 unsigned int edx_part;
395 unsigned int cstate_type; /* C-state type and not ACPI C-state type */
396 unsigned int num_cstate_subtype;
398 if ( c->cpuid_level < CPUID_MWAIT_LEAF )
399 {
400 printk(XENLOG_INFO "MWAIT leaf not supported by cpuid\n");
401 return -EFAULT;
402 }
404 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
405 printk(XENLOG_DEBUG "cpuid.MWAIT[.eax=%x, .ebx=%x, .ecx=%x, .edx=%x]\n",
406 eax, ebx, ecx, edx);
408 /* Check whether this particular cx_type (in CST) is supported or not */
409 cstate_type = (cx->reg.address >> MWAIT_SUBSTATE_SIZE) + 1;
410 edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
411 num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
413 if ( num_cstate_subtype < (cx->reg.address & MWAIT_SUBSTATE_MASK) )
414 return -EFAULT;
416 /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
417 if ( !(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
418 !(ecx & CPUID5_ECX_INTERRUPT_BREAK) )
419 return -EFAULT;
421 printk(XENLOG_INFO "Monitor-Mwait will be used to enter C-%d state\n", cx->type);
422 return 0;
423 }
425 /*
426 * Initialize bm_flags based on the CPU cache properties
427 * On SMP it depends on cache configuration
428 * - When cache is not shared among all CPUs, we flush cache
429 * before entering C3.
430 * - When cache is shared among all CPUs, we use bm_check
431 * mechanism as in UP case
432 *
433 * This routine is called only after all the CPUs are online
434 */
435 static void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags)
436 {
437 struct cpuinfo_x86 *c = &current_cpu_data;
439 flags->bm_check = 0;
440 if ( num_online_cpus() == 1 )
441 flags->bm_check = 1;
442 else if ( c->x86_vendor == X86_VENDOR_INTEL )
443 {
444 /*
445 * Today all CPUs that support C3 share cache.
446 * TBD: This needs to look at cache shared map, once
447 * multi-core detection patch makes to the base.
448 */
449 flags->bm_check = 1;
450 }
451 }
453 #define VENDOR_INTEL (1)
454 #define NATIVE_CSTATE_BEYOND_HALT (2)
456 static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx)
457 {
458 static int bm_check_flag;
460 switch ( cx->reg.space_id )
461 {
462 case ACPI_ADR_SPACE_SYSTEM_IO:
463 if ( cx->reg.address == 0 )
464 return -EINVAL;
465 break;
467 case ACPI_ADR_SPACE_FIXED_HARDWARE:
468 if ( cx->type > ACPI_STATE_C1 )
469 {
470 if ( cx->reg.bit_width != VENDOR_INTEL ||
471 cx->reg.bit_offset != NATIVE_CSTATE_BEYOND_HALT )
472 return -EINVAL;
474 /* assume all logical cpu has the same support for mwait */
475 if ( acpi_processor_ffh_cstate_probe(cx) )
476 return -EINVAL;
477 }
478 break;
480 default:
481 return -ENODEV;
482 }
484 if ( cx->type == ACPI_STATE_C3 )
485 {
486 /* We must be able to use HPET in place of LAPIC timers. */
487 if ( hpet_broadcast_is_available() )
488 {
489 lapic_timer_off = hpet_broadcast_enter;
490 lapic_timer_on = hpet_broadcast_exit;
491 }
492 else if ( pit_broadcast_is_available() )
493 {
494 lapic_timer_off = pit_broadcast_enter;
495 lapic_timer_on = pit_broadcast_exit;
496 }
497 else
498 {
499 return -EINVAL;
500 }
502 /* All the logic here assumes flags.bm_check is same across all CPUs */
503 if ( !bm_check_flag )
504 {
505 /* Determine whether bm_check is needed based on CPU */
506 acpi_processor_power_init_bm_check(&(power->flags));
507 bm_check_flag = power->flags.bm_check;
508 }
509 else
510 {
511 power->flags.bm_check = bm_check_flag;
512 }
514 if ( power->flags.bm_check )
515 {
516 if ( !power->flags.bm_control )
517 {
518 if ( power->flags.has_cst != 1 )
519 {
520 /* bus mastering control is necessary */
521 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
522 "C3 support requires BM control\n"));
523 return -EINVAL;
524 }
525 else
526 {
527 /* Here we enter C3 without bus mastering */
528 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
529 "C3 support without BM control\n"));
530 }
531 }
532 }
533 else
534 {
535 /*
536 * WBINVD should be set in fadt, for C3 state to be
537 * supported on when bm_check is not required.
538 */
539 if ( !(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD) )
540 {
541 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
542 "Cache invalidation should work properly"
543 " for C3 to be enabled on SMP systems\n"));
544 return -EINVAL;
545 }
546 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
547 }
548 }
550 return 0;
551 }
553 static unsigned int latency_factor = 2;
555 static void set_cx(
556 struct acpi_processor_power *acpi_power,
557 xen_processor_cx_t *xen_cx)
558 {
559 struct acpi_processor_cx *cx;
561 if ( check_cx(acpi_power, xen_cx) != 0 )
562 return;
564 if ( xen_cx->type == ACPI_STATE_C1 )
565 cx = &acpi_power->states[1];
566 else
567 cx = &acpi_power->states[acpi_power->count];
569 if ( !cx->valid )
570 acpi_power->count++;
572 cx->valid = 1;
573 cx->type = xen_cx->type;
574 cx->address = xen_cx->reg.address;
575 cx->space_id = xen_cx->reg.space_id;
576 cx->latency = xen_cx->latency;
577 cx->power = xen_cx->power;
579 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
580 cx->target_residency = cx->latency * latency_factor;
581 if ( cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 )
582 acpi_power->safe_state = cx;
583 }
585 int get_cpu_id(u8 acpi_id)
586 {
587 int i;
588 u8 apic_id;
590 apic_id = x86_acpiid_to_apicid[acpi_id];
591 if ( apic_id == 0xff )
592 return -1;
594 for ( i = 0; i < NR_CPUS; i++ )
595 {
596 if ( apic_id == x86_cpu_to_apicid[i] )
597 return i;
598 }
600 return -1;
601 }
603 #ifdef DEBUG_PM_CX
604 static void print_cx_pminfo(uint32_t cpu, struct xen_processor_power *power)
605 {
606 XEN_GUEST_HANDLE(xen_processor_cx_t) states;
607 xen_processor_cx_t state;
608 XEN_GUEST_HANDLE(xen_processor_csd_t) csd;
609 xen_processor_csd_t dp;
610 uint32_t i;
612 printk("cpu%d cx acpi info:\n", cpu);
613 printk("\tcount = %d\n", power->count);
614 printk("\tflags: bm_cntl[%d], bm_chk[%d], has_cst[%d],\n"
615 "\t pwr_setup_done[%d], bm_rld_set[%d]\n",
616 power->flags.bm_control, power->flags.bm_check, power->flags.has_cst,
617 power->flags.power_setup_done, power->flags.bm_rld_set);
619 states = power->states;
621 for ( i = 0; i < power->count; i++ )
622 {
623 if ( unlikely(copy_from_guest_offset(&state, states, i, 1)) )
624 return;
626 printk("\tstates[%d]:\n", i);
627 printk("\t\treg.space_id = 0x%x\n", state.reg.space_id);
628 printk("\t\treg.bit_width = 0x%x\n", state.reg.bit_width);
629 printk("\t\treg.bit_offset = 0x%x\n", state.reg.bit_offset);
630 printk("\t\treg.access_size = 0x%x\n", state.reg.access_size);
631 printk("\t\treg.address = 0x%"PRIx64"\n", state.reg.address);
632 printk("\t\ttype = %d\n", state.type);
633 printk("\t\tlatency = %d\n", state.latency);
634 printk("\t\tpower = %d\n", state.power);
636 csd = state.dp;
637 printk("\t\tdp(@0x%p)\n", csd.p);
639 if ( csd.p != NULL )
640 {
641 if ( unlikely(copy_from_guest(&dp, csd, 1)) )
642 return;
643 printk("\t\t\tdomain = %d\n", dp.domain);
644 printk("\t\t\tcoord_type = %d\n", dp.coord_type);
645 printk("\t\t\tnum = %d\n", dp.num);
646 }
647 }
648 }
649 #else
650 #define print_cx_pminfo(c, p)
651 #endif
653 long set_cx_pminfo(uint32_t cpu, struct xen_processor_power *power)
654 {
655 XEN_GUEST_HANDLE(xen_processor_cx_t) states;
656 xen_processor_cx_t xen_cx;
657 struct acpi_processor_power *acpi_power;
658 int cpu_id, i;
660 if ( unlikely(!guest_handle_okay(power->states, power->count)) )
661 return -EFAULT;
663 print_cx_pminfo(cpu, power);
665 /* map from acpi_id to cpu_id */
666 cpu_id = get_cpu_id((u8)cpu);
667 if ( cpu_id == -1 )
668 {
669 printk(XENLOG_ERR "no cpu_id for acpi_id %d\n", cpu);
670 return -EFAULT;
671 }
673 acpi_power = &processor_powers[cpu_id];
675 init_cx_pminfo(acpi_power);
677 acpi_power->cpu = cpu_id;
678 acpi_power->flags.bm_check = power->flags.bm_check;
679 acpi_power->flags.bm_control = power->flags.bm_control;
680 acpi_power->flags.has_cst = power->flags.has_cst;
682 states = power->states;
684 for ( i = 0; i < power->count; i++ )
685 {
686 if ( unlikely(copy_from_guest_offset(&xen_cx, states, i, 1)) )
687 return -EFAULT;
689 set_cx(acpi_power, &xen_cx);
690 }
692 if ( cpuidle_current_governor->enable &&
693 cpuidle_current_governor->enable(acpi_power) )
694 return -EFAULT;
696 /* FIXME: C-state dependency is not supported by far */
698 print_acpi_power(cpu_id, acpi_power);
700 if ( cpu_id == 0 && pm_idle_save == NULL )
701 {
702 pm_idle_save = pm_idle;
703 pm_idle = acpi_processor_idle;
704 }
706 return 0;
707 }
709 uint32_t pmstat_get_cx_nr(uint32_t cpuid)
710 {
711 return processor_powers[cpuid].count;
712 }
714 int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat)
715 {
716 struct acpi_processor_power *power = &processor_powers[cpuid];
717 struct vcpu *v = idle_vcpu[cpuid];
718 uint64_t usage;
719 int i;
721 stat->last = (power->last_state) ?
722 (int)(power->last_state - &power->states[0]) : 0;
723 stat->nr = processor_powers[cpuid].count;
724 stat->idle_time = v->runstate.time[RUNSTATE_running];
725 if ( v->is_running )
726 stat->idle_time += NOW() - v->runstate.state_entry_time;
728 for ( i = 0; i < power->count; i++ )
729 {
730 usage = power->states[i].usage;
731 if ( copy_to_guest_offset(stat->triggers, i, &usage, 1) )
732 return -EFAULT;
733 }
734 for ( i = 0; i < power->count; i++ )
735 if ( copy_to_guest_offset(stat->residencies, i,
736 &power->states[i].time, 1) )
737 return -EFAULT;
739 return 0;
740 }
742 int pmstat_reset_cx_stat(uint32_t cpuid)
743 {
744 return 0;
745 }