ia64/xen-unstable
changeset 15583:ad11f74d298c
Add HVM hardware feature suspend/resume.
Signed-off-by: Ke Yu <ke.yu@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Ke Yu <ke.yu@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Wed Jul 11 17:23:09 2007 +0100 (2007-07-11) |
parents | 24379dde8ac4 |
children | e00547dcda09 |
files | xen/arch/x86/acpi/power.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/vmx/vmcs.h |
line diff
1.1 --- a/xen/arch/x86/acpi/power.c Wed Jul 11 15:47:14 2007 +0100 1.2 +++ b/xen/arch/x86/acpi/power.c Wed Jul 11 17:23:09 2007 +0100 1.3 @@ -82,10 +82,27 @@ static void device_power_up(void) 1.4 console_resume(); 1.5 } 1.6 1.7 +static void freeze_domains(void) 1.8 +{ 1.9 + struct domain *d; 1.10 + 1.11 + for_each_domain(d) 1.12 + if (d->domain_id != 0) 1.13 + domain_pause(d); 1.14 +} 1.15 + 1.16 +static void thaw_domains(void) 1.17 +{ 1.18 + struct domain *d; 1.19 + 1.20 + for_each_domain(d) 1.21 + if (d->domain_id != 0) 1.22 + domain_unpause(d); 1.23 +} 1.24 + 1.25 /* Main interface to do xen specific suspend/resume */ 1.26 int enter_state(u32 state) 1.27 { 1.28 - struct domain *d; 1.29 unsigned long flags; 1.30 int error; 1.31 1.32 @@ -99,9 +116,9 @@ int enter_state(u32 state) 1.33 if (!spin_trylock(&pm_lock)) 1.34 return -EBUSY; 1.35 1.36 - for_each_domain(d) 1.37 - if (d->domain_id != 0) 1.38 - domain_pause(d); 1.39 + freeze_domains(); 1.40 + 1.41 + hvm_suspend_cpu(); 1.42 1.43 pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n", 1.44 acpi_states[state]); 1.45 @@ -135,13 +152,11 @@ int enter_state(u32 state) 1.46 Done: 1.47 local_irq_restore(flags); 1.48 1.49 - for_each_domain(d) 1.50 - if (d->domain_id!=0) 1.51 - domain_unpause(d); 1.52 + hvm_resume_cpu(); 1.53 1.54 + thaw_domains(); 1.55 spin_unlock(&pm_lock); 1.56 return error; 1.57 - 1.58 } 1.59 1.60 /*
2.1 --- a/xen/arch/x86/hvm/hvm.c Wed Jul 11 15:47:14 2007 +0100 2.2 +++ b/xen/arch/x86/hvm/hvm.c Wed Jul 11 17:23:09 2007 +0100 2.3 @@ -78,8 +78,7 @@ void hvm_enable(struct hvm_function_tabl 2.4 2.5 void hvm_disable(void) 2.6 { 2.7 - if ( hvm_enabled ) 2.8 - hvm_funcs.disable(); 2.9 + hvm_suspend_cpu(); 2.10 } 2.11 2.12 void hvm_stts(struct vcpu *v)
3.1 --- a/xen/arch/x86/hvm/svm/svm.c Wed Jul 11 15:47:14 2007 +0100 3.2 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Jul 11 17:23:09 2007 +0100 3.3 @@ -94,9 +94,8 @@ static void svm_inject_exception(struct 3.4 vmcb->eventinj = event; 3.5 } 3.6 3.7 -static void stop_svm(void) 3.8 +static void svm_suspend_cpu(void) 3.9 { 3.10 - /* We turn off the EFER_SVME bit. */ 3.11 write_efer(read_efer() & ~EFER_SVME); 3.12 } 3.13 3.14 @@ -974,7 +973,7 @@ static int svm_event_injection_faulted(s 3.15 3.16 static struct hvm_function_table svm_function_table = { 3.17 .name = "SVM", 3.18 - .disable = stop_svm, 3.19 + .suspend_cpu = svm_suspend_cpu, 3.20 .domain_initialise = svm_domain_initialise, 3.21 .domain_destroy = svm_domain_destroy, 3.22 .vcpu_initialise = svm_vcpu_initialise,
4.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Jul 11 15:47:14 2007 +0100 4.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Jul 11 17:23:09 2007 +0100 4.3 @@ -45,7 +45,9 @@ u32 vmx_vmexit_control __read_mostly; 4.4 u32 vmx_vmentry_control __read_mostly; 4.5 bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly; 4.6 4.7 +static DEFINE_PER_CPU(struct vmcs_struct *, host_vmcs); 4.8 static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs); 4.9 +static DEFINE_PER_CPU(struct list_head, active_vmcs_list); 4.10 4.11 static u32 vmcs_revision_id __read_mostly; 4.12 4.13 @@ -185,34 +187,81 @@ static void vmx_free_vmcs(struct vmcs_st 4.14 static void __vmx_clear_vmcs(void *info) 4.15 { 4.16 struct vcpu *v = info; 4.17 + struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; 4.18 4.19 - __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs)); 4.20 + /* Otherwise we can nest (vmx_suspend_cpu() vs. vmx_clear_vmcs()). */ 4.21 + ASSERT(!local_irq_is_enabled()); 4.22 + 4.23 + if ( arch_vmx->active_cpu == smp_processor_id() ) 4.24 + { 4.25 + __vmpclear(virt_to_maddr(arch_vmx->vmcs)); 4.26 4.27 - v->arch.hvm_vmx.active_cpu = -1; 4.28 - v->arch.hvm_vmx.launched = 0; 4.29 + arch_vmx->active_cpu = -1; 4.30 + arch_vmx->launched = 0; 4.31 4.32 - if ( v->arch.hvm_vmx.vmcs == this_cpu(current_vmcs) ) 4.33 - this_cpu(current_vmcs) = NULL; 4.34 + list_del(&arch_vmx->active_list); 4.35 + 4.36 + if ( arch_vmx->vmcs == this_cpu(current_vmcs) ) 4.37 + this_cpu(current_vmcs) = NULL; 4.38 + } 4.39 } 4.40 4.41 static void vmx_clear_vmcs(struct vcpu *v) 4.42 { 4.43 int cpu = v->arch.hvm_vmx.active_cpu; 4.44 4.45 - if ( cpu == -1 ) 4.46 - return; 4.47 - 4.48 - if ( cpu == smp_processor_id() ) 4.49 - return __vmx_clear_vmcs(v); 4.50 - 4.51 - on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1); 4.52 + if ( cpu != -1 ) 4.53 + on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1); 4.54 } 4.55 4.56 static void vmx_load_vmcs(struct vcpu *v) 4.57 { 4.58 + unsigned long flags; 4.59 + 4.60 + local_irq_save(flags); 4.61 + 4.62 + if ( v->arch.hvm_vmx.active_cpu == -1 ) 4.63 + { 4.64 + list_add(&v->arch.hvm_vmx.active_list, &this_cpu(active_vmcs_list)); 4.65 + v->arch.hvm_vmx.active_cpu = smp_processor_id(); 4.66 + } 4.67 + 4.68 + ASSERT(v->arch.hvm_vmx.active_cpu == smp_processor_id()); 4.69 + 4.70 __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs)); 4.71 - v->arch.hvm_vmx.active_cpu = smp_processor_id(); 4.72 this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs; 4.73 + 4.74 + local_irq_restore(flags); 4.75 +} 4.76 + 4.77 +void vmx_suspend_cpu(void) 4.78 +{ 4.79 + struct list_head *active_vmcs_list = &this_cpu(active_vmcs_list); 4.80 + unsigned long flags; 4.81 + 4.82 + local_irq_save(flags); 4.83 + 4.84 + while ( !list_empty(active_vmcs_list) ) 4.85 + __vmx_clear_vmcs(list_entry(active_vmcs_list->next, 4.86 + struct vcpu, arch.hvm_vmx.active_list)); 4.87 + 4.88 + if ( read_cr4() & X86_CR4_VMXE ) 4.89 + { 4.90 + __vmxoff(); 4.91 + clear_in_cr4(X86_CR4_VMXE); 4.92 + } 4.93 + 4.94 + local_irq_restore(flags); 4.95 +} 4.96 + 4.97 +void vmx_resume_cpu(void) 4.98 +{ 4.99 + if ( !read_cr4() & X86_CR4_VMXE ) 4.100 + { 4.101 + set_in_cr4(X86_CR4_VMXE); 4.102 + if ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) ) 4.103 + BUG(); 4.104 + } 4.105 } 4.106 4.107 void vmx_vmcs_enter(struct vcpu *v) 4.108 @@ -247,12 +296,17 @@ void vmx_vmcs_exit(struct vcpu *v) 4.109 4.110 struct vmcs_struct *vmx_alloc_host_vmcs(void) 4.111 { 4.112 - return vmx_alloc_vmcs(); 4.113 + ASSERT(this_cpu(host_vmcs) == NULL); 4.114 + this_cpu(host_vmcs) = vmx_alloc_vmcs(); 4.115 + INIT_LIST_HEAD(&this_cpu(active_vmcs_list)); 4.116 + return this_cpu(host_vmcs); 4.117 } 4.118 4.119 void vmx_free_host_vmcs(struct vmcs_struct *vmcs) 4.120 { 4.121 + ASSERT(vmcs == this_cpu(host_vmcs)); 4.122 vmx_free_vmcs(vmcs); 4.123 + this_cpu(host_vmcs) = NULL; 4.124 } 4.125 4.126 struct xgt_desc { 4.127 @@ -451,12 +505,17 @@ static void construct_vmcs(struct vcpu * 4.128 4.129 int vmx_create_vmcs(struct vcpu *v) 4.130 { 4.131 - if ( v->arch.hvm_vmx.vmcs == NULL ) 4.132 + struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; 4.133 + 4.134 + if ( arch_vmx->vmcs == NULL ) 4.135 { 4.136 - if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL ) 4.137 + if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL ) 4.138 return -ENOMEM; 4.139 4.140 - __vmx_clear_vmcs(v); 4.141 + INIT_LIST_HEAD(&arch_vmx->active_list); 4.142 + __vmpclear(virt_to_maddr(arch_vmx->vmcs)); 4.143 + arch_vmx->active_cpu = -1; 4.144 + arch_vmx->launched = 0; 4.145 } 4.146 4.147 construct_vmcs(v);
5.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Jul 11 15:47:14 2007 +0100 5.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Jul 11 17:23:09 2007 +0100 5.3 @@ -907,15 +907,6 @@ static void vmx_ctxt_switch_to(struct vc 5.4 vmx_restore_dr(v); 5.5 } 5.6 5.7 -static void stop_vmx(void) 5.8 -{ 5.9 - if ( !(read_cr4() & X86_CR4_VMXE) ) 5.10 - return; 5.11 - 5.12 - __vmxoff(); 5.13 - clear_in_cr4(X86_CR4_VMXE); 5.14 -} 5.15 - 5.16 static void vmx_store_cpu_guest_regs( 5.17 struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs) 5.18 { 5.19 @@ -1244,7 +1235,6 @@ static void disable_intercept_for_msr(u3 5.20 5.21 static struct hvm_function_table vmx_function_table = { 5.22 .name = "VMX", 5.23 - .disable = stop_vmx, 5.24 .domain_initialise = vmx_domain_initialise, 5.25 .domain_destroy = vmx_domain_destroy, 5.26 .vcpu_initialise = vmx_vcpu_initialise, 5.27 @@ -1271,7 +1261,9 @@ static struct hvm_function_table vmx_fun 5.28 .inject_exception = vmx_inject_exception, 5.29 .init_ap_context = vmx_init_ap_context, 5.30 .init_hypercall_page = vmx_init_hypercall_page, 5.31 - .event_injection_faulted = vmx_event_injection_faulted 5.32 + .event_injection_faulted = vmx_event_injection_faulted, 5.33 + .suspend_cpu = vmx_suspend_cpu, 5.34 + .resume_cpu = vmx_resume_cpu, 5.35 }; 5.36 5.37 int start_vmx(void)
6.1 --- a/xen/include/asm-x86/hvm/hvm.h Wed Jul 11 15:47:14 2007 +0100 6.2 +++ b/xen/include/asm-x86/hvm/hvm.h Wed Jul 11 17:23:09 2007 +0100 6.3 @@ -72,11 +72,6 @@ struct hvm_function_table { 6.4 char *name; 6.5 6.6 /* 6.7 - * Disable HVM functionality 6.8 - */ 6.9 - void (*disable)(void); 6.10 - 6.11 - /* 6.12 * Initialise/destroy HVM domain/vcpu resources 6.13 */ 6.14 int (*domain_initialise)(struct domain *d); 6.15 @@ -160,6 +155,9 @@ struct hvm_function_table { 6.16 void (*init_hypercall_page)(struct domain *d, void *hypercall_page); 6.17 6.18 int (*event_injection_faulted)(struct vcpu *v); 6.19 + 6.20 + void (*suspend_cpu)(void); 6.21 + void (*resume_cpu)(void); 6.22 }; 6.23 6.24 extern struct hvm_function_table hvm_funcs; 6.25 @@ -316,4 +314,16 @@ static inline int hvm_event_injection_fa 6.26 /* These exceptions must always be intercepted. */ 6.27 #define HVM_TRAP_MASK (1U << TRAP_machine_check) 6.28 6.29 +static inline void hvm_suspend_cpu(void) 6.30 +{ 6.31 + if ( hvm_funcs.suspend_cpu ) 6.32 + hvm_funcs.suspend_cpu(); 6.33 +} 6.34 + 6.35 +static inline void hvm_resume_cpu(void) 6.36 +{ 6.37 + if ( hvm_funcs.resume_cpu ) 6.38 + hvm_funcs.resume_cpu(); 6.39 +} 6.40 + 6.41 #endif /* __ASM_X86_HVM_HVM_H__ */
7.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Jul 11 15:47:14 2007 +0100 7.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Jul 11 17:23:09 2007 +0100 7.3 @@ -28,6 +28,8 @@ extern int start_vmx(void); 7.4 extern void vmcs_dump_vcpu(void); 7.5 extern void vmx_init_vmcs_config(void); 7.6 extern void setup_vmcs_dump(void); 7.7 +extern void vmx_suspend_cpu(void); 7.8 +extern void vmx_resume_cpu(void); 7.9 7.10 struct vmcs_struct { 7.11 u32 vmcs_revision_id; 7.12 @@ -59,6 +61,7 @@ struct arch_vmx_struct { 7.13 * - Activated on a CPU by VMPTRLD. Deactivated by VMCLEAR. 7.14 * - Launched on active CPU by VMLAUNCH when current VMCS. 7.15 */ 7.16 + struct list_head active_list; 7.17 int active_cpu; 7.18 int launched; 7.19