ia64/xen-unstable

view xen/arch/x86/acpi/cpu_idle.c @ 19533:81bf0ddfcddc

Disable debug build by default.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Apr 09 17:31:35 2009 +0100 (2009-04-09)
parents f6a2bf60d49c
children dbd9fc73d77d
line source
1 /*
2 * cpu_idle - xen idle state module derived from Linux
3 * drivers/acpi/processor_idle.c &
4 * arch/x86/kernel/acpi/cstate.c
5 *
6 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
7 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
8 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
9 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10 * - Added processor hotplug support
11 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
12 * - Added support for C3 on SMP
13 * Copyright (C) 2007, 2008 Intel Corporation
14 *
15 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or (at
20 * your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful, but
23 * WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 * General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License along
28 * with this program; if not, write to the Free Software Foundation, Inc.,
29 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
30 *
31 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
32 */
34 #include <xen/config.h>
35 #include <xen/errno.h>
36 #include <xen/lib.h>
37 #include <xen/types.h>
38 #include <xen/acpi.h>
39 #include <xen/smp.h>
40 #include <xen/guest_access.h>
41 #include <xen/keyhandler.h>
42 #include <xen/cpuidle.h>
43 #include <xen/trace.h>
44 #include <asm/cache.h>
45 #include <asm/io.h>
46 #include <asm/hpet.h>
47 #include <asm/processor.h>
48 #include <public/platform.h>
49 #include <public/sysctl.h>
51 /*#define DEBUG_PM_CX*/
53 static void (*lapic_timer_off)(void);
54 static void (*lapic_timer_on)(void);
56 extern u32 pmtmr_ioport;
57 extern void (*pm_idle) (void);
58 extern void (*dead_idle) (void);
60 static void (*pm_idle_save) (void) __read_mostly;
61 unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER - 1;
62 integer_param("max_cstate", max_cstate);
63 static int local_apic_timer_c2_ok __read_mostly = 0;
64 boolean_param("lapic_timer_c2_ok", local_apic_timer_c2_ok);
66 static struct acpi_processor_power *__read_mostly processor_powers[NR_CPUS];
68 static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power)
69 {
70 uint32_t i, idle_usage = 0;
71 uint64_t res, idle_res = 0;
73 printk("==cpu%d==\n", cpu);
74 printk("active state:\t\tC%d\n",
75 power->last_state ? power->last_state->idx : -1);
76 printk("max_cstate:\t\tC%d\n", max_cstate);
77 printk("states:\n");
79 for ( i = 1; i < power->count; i++ )
80 {
81 res = acpi_pm_tick_to_ns(power->states[i].time);
82 idle_usage += power->states[i].usage;
83 idle_res += res;
85 printk((power->last_state && power->last_state->idx == i) ?
86 " *" : " ");
87 printk("C%d:\t", i);
88 printk("type[C%d] ", power->states[i].type);
89 printk("latency[%03d] ", power->states[i].latency);
90 printk("usage[%08d] ", power->states[i].usage);
91 printk("duration[%"PRId64"]\n", res);
92 }
93 printk(" C0:\tusage[%08d] duration[%"PRId64"]\n",
94 idle_usage, NOW() - idle_res);
96 }
98 static void dump_cx(unsigned char key)
99 {
100 unsigned int cpu;
102 for_each_online_cpu ( cpu )
103 if (processor_powers[cpu])
104 print_acpi_power(cpu, processor_powers[cpu]);
105 }
107 static int __init cpu_idle_key_init(void)
108 {
109 register_keyhandler(
110 'c', dump_cx, "dump cx structures");
111 return 0;
112 }
113 __initcall(cpu_idle_key_init);
115 static inline u32 ticks_elapsed(u32 t1, u32 t2)
116 {
117 if ( t2 >= t1 )
118 return (t2 - t1);
119 else if ( !(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) )
120 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
121 else
122 return ((0xFFFFFFFF - t1) + t2);
123 }
125 static void acpi_safe_halt(void)
126 {
127 smp_mb__after_clear_bit();
128 safe_halt();
129 }
131 #define MWAIT_ECX_INTERRUPT_BREAK (0x1)
133 static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
134 {
135 __monitor((void *)current, 0, 0);
136 smp_mb();
137 __mwait(eax, ecx);
138 }
140 static void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
141 {
142 mwait_idle_with_hints(cx->address, MWAIT_ECX_INTERRUPT_BREAK);
143 }
145 static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
146 {
147 int unused;
149 switch ( cx->entry_method )
150 {
151 case ACPI_CSTATE_EM_FFH:
152 /* Call into architectural FFH based C-state */
153 acpi_processor_ffh_cstate_enter(cx);
154 return;
155 case ACPI_CSTATE_EM_SYSIO:
156 /* IO port based C-state */
157 inb(cx->address);
158 /* Dummy wait op - must do something useless after P_LVL2 read
159 because chipsets cannot guarantee that STPCLK# signal
160 gets asserted in time to freeze execution properly. */
161 unused = inl(pmtmr_ioport);
162 return;
163 case ACPI_CSTATE_EM_HALT:
164 acpi_safe_halt();
165 local_irq_disable();
166 return;
167 }
168 }
170 static int acpi_idle_bm_check(void)
171 {
172 u32 bm_status = 0;
174 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
175 if ( bm_status )
176 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
177 /*
178 * TBD: PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
179 * the true state of bus mastering activity; forcing us to
180 * manually check the BMIDEA bit of each IDE channel.
181 */
182 return bm_status;
183 }
185 static struct {
186 spinlock_t lock;
187 unsigned int count;
188 } c3_cpu_status = { .lock = SPIN_LOCK_UNLOCKED };
190 static void acpi_processor_idle(void)
191 {
192 struct acpi_processor_power *power = processor_powers[smp_processor_id()];
193 struct acpi_processor_cx *cx = NULL;
194 int next_state;
195 int sleep_ticks = 0;
196 u32 t1, t2 = 0;
198 sched_tick_suspend();
199 /*
200 * sched_tick_suspend may raise TIMER_SOFTIRQ by __stop_timer,
201 * which will break the later assumption of no sofirq pending,
202 * so add do_softirq
203 */
204 if ( softirq_pending(smp_processor_id()) )
205 do_softirq();
207 /*
208 * Interrupts must be disabled during bus mastering calculations and
209 * for C2/C3 transitions.
210 */
211 local_irq_disable();
213 if ( softirq_pending(smp_processor_id()) )
214 {
215 local_irq_enable();
216 sched_tick_resume();
217 return;
218 }
220 next_state = power ? cpuidle_current_governor->select(power) : -1;
221 if ( next_state > 0 )
222 {
223 cx = &power->states[next_state];
224 if ( power->flags.bm_check && acpi_idle_bm_check()
225 && cx->type == ACPI_STATE_C3 )
226 cx = power->safe_state;
227 if ( cx->idx > max_cstate )
228 cx = &power->states[max_cstate];
229 }
230 if ( !cx )
231 {
232 if ( pm_idle_save )
233 pm_idle_save();
234 else
235 acpi_safe_halt();
236 sched_tick_resume();
237 return;
238 }
240 power->last_state = cx;
242 /*
243 * Sleep:
244 * ------
245 * Invoke the current Cx state to put the processor to sleep.
246 */
247 switch ( cx->type )
248 {
249 case ACPI_STATE_C1:
250 case ACPI_STATE_C2:
251 if ( cx->type == ACPI_STATE_C1 || local_apic_timer_c2_ok )
252 {
253 /* Get start time (ticks) */
254 t1 = inl(pmtmr_ioport);
255 /* Trace cpu idle entry */
256 TRACE_2D(TRC_PM_IDLE_ENTRY, cx->idx, t1);
257 /* Invoke C2 */
258 acpi_idle_do_entry(cx);
259 /* Get end time (ticks) */
260 t2 = inl(pmtmr_ioport);
261 /* Trace cpu idle exit */
262 TRACE_2D(TRC_PM_IDLE_EXIT, cx->idx, t2);
264 /* Re-enable interrupts */
265 local_irq_enable();
266 /* Compute time (ticks) that we were actually asleep */
267 sleep_ticks = ticks_elapsed(t1, t2);
268 break;
269 }
271 case ACPI_STATE_C3:
272 /*
273 * disable bus master
274 * bm_check implies we need ARB_DIS
275 * !bm_check implies we need cache flush
276 * bm_control implies whether we can do ARB_DIS
277 *
278 * That leaves a case where bm_check is set and bm_control is
279 * not set. In that case we cannot do much, we enter C3
280 * without doing anything.
281 */
282 if ( power->flags.bm_check && power->flags.bm_control )
283 {
284 spin_lock(&c3_cpu_status.lock);
285 if ( ++c3_cpu_status.count == num_online_cpus() )
286 {
287 /*
288 * All CPUs are trying to go to C3
289 * Disable bus master arbitration
290 */
291 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
292 }
293 spin_unlock(&c3_cpu_status.lock);
294 }
295 else if ( !power->flags.bm_check )
296 {
297 /* SMP with no shared cache... Invalidate cache */
298 ACPI_FLUSH_CPU_CACHE();
299 }
301 /*
302 * Before invoking C3, be aware that TSC/APIC timer may be
303 * stopped by H/W. Without carefully handling of TSC/APIC stop issues,
304 * deep C state can't work correctly.
305 */
306 /* preparing APIC stop */
307 lapic_timer_off();
309 /* Get start time (ticks) */
310 t1 = inl(pmtmr_ioport);
311 /* Trace cpu idle entry */
312 TRACE_2D(TRC_PM_IDLE_ENTRY, cx->idx, t1);
313 /* Invoke C3 */
314 acpi_idle_do_entry(cx);
315 /* Get end time (ticks) */
316 t2 = inl(pmtmr_ioport);
318 /* recovering TSC */
319 cstate_restore_tsc();
320 /* Trace cpu idle exit */
321 TRACE_2D(TRC_PM_IDLE_EXIT, cx->idx, t2);
323 if ( power->flags.bm_check && power->flags.bm_control )
324 {
325 /* Enable bus master arbitration */
326 spin_lock(&c3_cpu_status.lock);
327 if ( c3_cpu_status.count-- == num_online_cpus() )
328 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
329 spin_unlock(&c3_cpu_status.lock);
330 }
332 /* Re-enable interrupts */
333 local_irq_enable();
334 /* recovering APIC */
335 lapic_timer_on();
336 /* Compute time (ticks) that we were actually asleep */
337 sleep_ticks = ticks_elapsed(t1, t2);
339 break;
341 default:
342 local_irq_enable();
343 sched_tick_resume();
344 return;
345 }
347 cx->usage++;
348 if ( sleep_ticks > 0 )
349 {
350 power->last_residency = acpi_pm_tick_to_ns(sleep_ticks) / 1000UL;
351 cx->time += sleep_ticks;
352 }
354 sched_tick_resume();
356 if ( cpuidle_current_governor->reflect )
357 cpuidle_current_governor->reflect(power);
358 }
360 static void acpi_dead_idle(void)
361 {
362 struct acpi_processor_power *power;
363 struct acpi_processor_cx *cx;
364 int unused;
366 if ( (power = processor_powers[smp_processor_id()]) == NULL )
367 goto default_halt;
369 if ( (cx = &power->states[power->count-1]) == NULL )
370 goto default_halt;
372 for ( ; ; )
373 {
374 if ( !power->flags.bm_check && cx->type == ACPI_STATE_C3 )
375 ACPI_FLUSH_CPU_CACHE();
377 switch ( cx->entry_method )
378 {
379 case ACPI_CSTATE_EM_FFH:
380 /* Not treat interrupt as break event */
381 mwait_idle_with_hints(cx->address, 0);
382 break;
383 case ACPI_CSTATE_EM_SYSIO:
384 inb(cx->address);
385 unused = inl(pmtmr_ioport);
386 break;
387 default:
388 goto default_halt;
389 }
390 }
392 default_halt:
393 for ( ; ; )
394 halt();
395 }
397 static int init_cx_pminfo(struct acpi_processor_power *acpi_power)
398 {
399 int i;
401 memset(acpi_power, 0, sizeof(*acpi_power));
403 for ( i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++ )
404 acpi_power->states[i].idx = i;
406 acpi_power->states[ACPI_STATE_C1].type = ACPI_STATE_C1;
407 acpi_power->states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_EM_HALT;
409 acpi_power->states[ACPI_STATE_C0].valid = 1;
410 acpi_power->states[ACPI_STATE_C1].valid = 1;
412 acpi_power->count = 2;
414 return 0;
415 }
417 #define CPUID_MWAIT_LEAF (5)
418 #define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
419 #define CPUID5_ECX_INTERRUPT_BREAK (0x2)
421 #define MWAIT_ECX_INTERRUPT_BREAK (0x1)
423 #define MWAIT_SUBSTATE_MASK (0xf)
424 #define MWAIT_SUBSTATE_SIZE (4)
426 static int acpi_processor_ffh_cstate_probe(xen_processor_cx_t *cx)
427 {
428 struct cpuinfo_x86 *c = &current_cpu_data;
429 unsigned int eax, ebx, ecx, edx;
430 unsigned int edx_part;
431 unsigned int cstate_type; /* C-state type and not ACPI C-state type */
432 unsigned int num_cstate_subtype;
434 if ( c->cpuid_level < CPUID_MWAIT_LEAF )
435 {
436 printk(XENLOG_INFO "MWAIT leaf not supported by cpuid\n");
437 return -EFAULT;
438 }
440 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
441 printk(XENLOG_DEBUG "cpuid.MWAIT[.eax=%x, .ebx=%x, .ecx=%x, .edx=%x]\n",
442 eax, ebx, ecx, edx);
444 /* Check whether this particular cx_type (in CST) is supported or not */
445 cstate_type = (cx->reg.address >> MWAIT_SUBSTATE_SIZE) + 1;
446 edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
447 num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
449 if ( num_cstate_subtype < (cx->reg.address & MWAIT_SUBSTATE_MASK) )
450 return -EFAULT;
452 /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
453 if ( !(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
454 !(ecx & CPUID5_ECX_INTERRUPT_BREAK) )
455 return -EFAULT;
457 printk(XENLOG_INFO "Monitor-Mwait will be used to enter C-%d state\n", cx->type);
458 return 0;
459 }
461 /*
462 * Initialize bm_flags based on the CPU cache properties
463 * On SMP it depends on cache configuration
464 * - When cache is not shared among all CPUs, we flush cache
465 * before entering C3.
466 * - When cache is shared among all CPUs, we use bm_check
467 * mechanism as in UP case
468 *
469 * This routine is called only after all the CPUs are online
470 */
471 static void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags)
472 {
473 struct cpuinfo_x86 *c = &current_cpu_data;
475 flags->bm_check = 0;
476 if ( num_online_cpus() == 1 )
477 flags->bm_check = 1;
478 else if ( c->x86_vendor == X86_VENDOR_INTEL )
479 {
480 /*
481 * Today all MP CPUs that support C3 share cache.
482 * And caches should not be flushed by software while
483 * entering C3 type state.
484 */
485 flags->bm_check = 1;
486 }
488 /*
489 * On all recent platforms, ARB_DISABLE is a nop.
490 * So, set bm_control to zero to indicate that ARB_DISABLE
491 * is not required while entering C3 type state on
492 * P4, Core and beyond CPUs
493 */
494 if ( c->x86_vendor == X86_VENDOR_INTEL &&
495 (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) )
496 flags->bm_control = 0;
497 }
499 #define VENDOR_INTEL (1)
500 #define NATIVE_CSTATE_BEYOND_HALT (2)
502 static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx)
503 {
504 static int bm_check_flag = -1;
505 static int bm_control_flag = -1;
507 switch ( cx->reg.space_id )
508 {
509 case ACPI_ADR_SPACE_SYSTEM_IO:
510 if ( cx->reg.address == 0 )
511 return -EINVAL;
512 break;
514 case ACPI_ADR_SPACE_FIXED_HARDWARE:
515 if ( cx->reg.bit_width != VENDOR_INTEL ||
516 cx->reg.bit_offset != NATIVE_CSTATE_BEYOND_HALT )
517 return -EINVAL;
519 /* assume all logical cpu has the same support for mwait */
520 if ( acpi_processor_ffh_cstate_probe(cx) )
521 return -EINVAL;
522 break;
524 default:
525 return -ENODEV;
526 }
528 switch ( cx->type )
529 {
530 case ACPI_STATE_C2:
531 if ( local_apic_timer_c2_ok )
532 break;
533 case ACPI_STATE_C3:
534 /* We must be able to use HPET in place of LAPIC timers. */
535 if ( hpet_broadcast_is_available() )
536 {
537 lapic_timer_off = hpet_broadcast_enter;
538 lapic_timer_on = hpet_broadcast_exit;
539 }
540 else if ( pit_broadcast_is_available() )
541 {
542 lapic_timer_off = pit_broadcast_enter;
543 lapic_timer_on = pit_broadcast_exit;
544 }
545 else
546 {
547 return -EINVAL;
548 }
550 /* All the logic here assumes flags.bm_check is same across all CPUs */
551 if ( bm_check_flag == -1 )
552 {
553 /* Determine whether bm_check is needed based on CPU */
554 acpi_processor_power_init_bm_check(&(power->flags));
555 bm_check_flag = power->flags.bm_check;
556 bm_control_flag = power->flags.bm_control;
557 }
558 else
559 {
560 power->flags.bm_check = bm_check_flag;
561 power->flags.bm_control = bm_control_flag;
562 }
564 if ( power->flags.bm_check )
565 {
566 if ( !power->flags.bm_control )
567 {
568 if ( power->flags.has_cst != 1 )
569 {
570 /* bus mastering control is necessary */
571 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
572 "C3 support requires BM control\n"));
573 return -EINVAL;
574 }
575 else
576 {
577 /* Here we enter C3 without bus mastering */
578 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
579 "C3 support without BM control\n"));
580 }
581 }
582 /*
583 * On older chipsets, BM_RLD needs to be set
584 * in order for Bus Master activity to wake the
585 * system from C3. Newer chipsets handle DMA
586 * during C3 automatically and BM_RLD is a NOP.
587 * In either case, the proper way to
588 * handle BM_RLD is to set it and leave it set.
589 */
590 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
591 }
592 else
593 {
594 /*
595 * WBINVD should be set in fadt, for C3 state to be
596 * supported on when bm_check is not required.
597 */
598 if ( !(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD) )
599 {
600 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
601 "Cache invalidation should work properly"
602 " for C3 to be enabled on SMP systems\n"));
603 return -EINVAL;
604 }
605 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
606 }
608 break;
609 }
611 return 0;
612 }
614 static unsigned int latency_factor = 2;
616 static void set_cx(
617 struct acpi_processor_power *acpi_power,
618 xen_processor_cx_t *xen_cx)
619 {
620 struct acpi_processor_cx *cx;
622 if ( check_cx(acpi_power, xen_cx) != 0 )
623 return;
625 if ( xen_cx->type == ACPI_STATE_C1 )
626 cx = &acpi_power->states[1];
627 else
628 cx = &acpi_power->states[acpi_power->count];
630 if ( !cx->valid )
631 acpi_power->count++;
633 cx->valid = 1;
634 cx->type = xen_cx->type;
635 cx->address = xen_cx->reg.address;
637 switch ( xen_cx->reg.space_id )
638 {
639 case ACPI_ADR_SPACE_FIXED_HARDWARE:
640 if ( xen_cx->reg.bit_width == VENDOR_INTEL &&
641 xen_cx->reg.bit_offset == NATIVE_CSTATE_BEYOND_HALT )
642 cx->entry_method = ACPI_CSTATE_EM_FFH;
643 else
644 cx->entry_method = ACPI_CSTATE_EM_HALT;
645 break;
646 case ACPI_ADR_SPACE_SYSTEM_IO:
647 cx->entry_method = ACPI_CSTATE_EM_SYSIO;
648 break;
649 default:
650 cx->entry_method = ACPI_CSTATE_EM_NONE;
651 }
653 cx->latency = xen_cx->latency;
654 cx->power = xen_cx->power;
656 cx->latency_ticks = ns_to_acpi_pm_tick(cx->latency * 1000UL);
657 cx->target_residency = cx->latency * latency_factor;
658 if ( cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 )
659 acpi_power->safe_state = cx;
660 }
662 int get_cpu_id(u8 acpi_id)
663 {
664 int i;
665 u8 apic_id;
667 apic_id = x86_acpiid_to_apicid[acpi_id];
668 if ( apic_id == 0xff )
669 return -1;
671 for ( i = 0; i < NR_CPUS; i++ )
672 {
673 if ( apic_id == x86_cpu_to_apicid[i] )
674 return i;
675 }
677 return -1;
678 }
680 #ifdef DEBUG_PM_CX
681 static void print_cx_pminfo(uint32_t cpu, struct xen_processor_power *power)
682 {
683 XEN_GUEST_HANDLE(xen_processor_cx_t) states;
684 xen_processor_cx_t state;
685 XEN_GUEST_HANDLE(xen_processor_csd_t) csd;
686 xen_processor_csd_t dp;
687 uint32_t i;
689 printk("cpu%d cx acpi info:\n", cpu);
690 printk("\tcount = %d\n", power->count);
691 printk("\tflags: bm_cntl[%d], bm_chk[%d], has_cst[%d],\n"
692 "\t pwr_setup_done[%d], bm_rld_set[%d]\n",
693 power->flags.bm_control, power->flags.bm_check, power->flags.has_cst,
694 power->flags.power_setup_done, power->flags.bm_rld_set);
696 states = power->states;
698 for ( i = 0; i < power->count; i++ )
699 {
700 if ( unlikely(copy_from_guest_offset(&state, states, i, 1)) )
701 return;
703 printk("\tstates[%d]:\n", i);
704 printk("\t\treg.space_id = 0x%x\n", state.reg.space_id);
705 printk("\t\treg.bit_width = 0x%x\n", state.reg.bit_width);
706 printk("\t\treg.bit_offset = 0x%x\n", state.reg.bit_offset);
707 printk("\t\treg.access_size = 0x%x\n", state.reg.access_size);
708 printk("\t\treg.address = 0x%"PRIx64"\n", state.reg.address);
709 printk("\t\ttype = %d\n", state.type);
710 printk("\t\tlatency = %d\n", state.latency);
711 printk("\t\tpower = %d\n", state.power);
713 csd = state.dp;
714 printk("\t\tdp(@0x%p)\n", csd.p);
716 if ( csd.p != NULL )
717 {
718 if ( unlikely(copy_from_guest(&dp, csd, 1)) )
719 return;
720 printk("\t\t\tdomain = %d\n", dp.domain);
721 printk("\t\t\tcoord_type = %d\n", dp.coord_type);
722 printk("\t\t\tnum = %d\n", dp.num);
723 }
724 }
725 }
726 #else
727 #define print_cx_pminfo(c, p)
728 #endif
730 long set_cx_pminfo(uint32_t cpu, struct xen_processor_power *power)
731 {
732 XEN_GUEST_HANDLE(xen_processor_cx_t) states;
733 xen_processor_cx_t xen_cx;
734 struct acpi_processor_power *acpi_power;
735 int cpu_id, i;
737 if ( unlikely(!guest_handle_okay(power->states, power->count)) )
738 return -EFAULT;
740 print_cx_pminfo(cpu, power);
742 /* map from acpi_id to cpu_id */
743 cpu_id = get_cpu_id((u8)cpu);
744 if ( cpu_id == -1 )
745 {
746 printk(XENLOG_ERR "no cpu_id for acpi_id %d\n", cpu);
747 return -EFAULT;
748 }
750 acpi_power = processor_powers[cpu_id];
751 if ( !acpi_power )
752 {
753 acpi_power = xmalloc(struct acpi_processor_power);
754 if ( !acpi_power )
755 return -ENOMEM;
756 memset(acpi_power, 0, sizeof(*acpi_power));
757 processor_powers[cpu_id] = acpi_power;
758 }
760 init_cx_pminfo(acpi_power);
762 acpi_power->cpu = cpu_id;
763 acpi_power->flags.bm_check = power->flags.bm_check;
764 acpi_power->flags.bm_control = power->flags.bm_control;
765 acpi_power->flags.has_cst = power->flags.has_cst;
767 states = power->states;
769 for ( i = 0; i < power->count; i++ )
770 {
771 if ( unlikely(copy_from_guest_offset(&xen_cx, states, i, 1)) )
772 return -EFAULT;
774 set_cx(acpi_power, &xen_cx);
775 }
777 if ( cpuidle_current_governor->enable &&
778 cpuidle_current_governor->enable(acpi_power) )
779 return -EFAULT;
781 /* FIXME: C-state dependency is not supported by far */
783 print_acpi_power(cpu_id, acpi_power);
785 if ( cpu_id == 0 && pm_idle_save == NULL )
786 {
787 pm_idle_save = pm_idle;
788 pm_idle = acpi_processor_idle;
789 }
791 if ( cpu_id == 0 )
792 {
793 dead_idle = acpi_dead_idle;
794 }
796 return 0;
797 }
799 uint32_t pmstat_get_cx_nr(uint32_t cpuid)
800 {
801 return processor_powers[cpuid] ? processor_powers[cpuid]->count : 0;
802 }
804 int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat)
805 {
806 const struct acpi_processor_power *power = processor_powers[cpuid];
807 uint64_t usage, res, idle_usage = 0, idle_res = 0;
808 int i;
810 if ( power == NULL )
811 {
812 stat->last = 0;
813 stat->nr = 0;
814 stat->idle_time = 0;
815 return 0;
816 }
818 stat->last = power->last_state ? power->last_state->idx : 0;
819 stat->nr = power->count;
820 stat->idle_time = get_cpu_idle_time(cpuid);
822 for ( i = power->count - 1; i >= 0; i-- )
823 {
824 if ( i != 0 )
825 {
826 usage = power->states[i].usage;
827 res = acpi_pm_tick_to_ns(power->states[i].time);
828 idle_usage += usage;
829 idle_res += res;
830 }
831 else
832 {
833 usage = idle_usage;
834 res = NOW() - idle_res;
835 }
836 if ( copy_to_guest_offset(stat->triggers, i, &usage, 1) ||
837 copy_to_guest_offset(stat->residencies, i, &res, 1) )
838 return -EFAULT;
839 }
841 return 0;
842 }
844 int pmstat_reset_cx_stat(uint32_t cpuid)
845 {
846 return 0;
847 }