ia64/xen-unstable
changeset 10648:82f481bda1c7
[HVM][VMX] Move vmcs and I/O bitmap allocation into
vmx_initialise_guest_resources().
Signed-off-by: Xin B Li <xin.b.li@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
vmx_initialise_guest_resources().
Signed-off-by: Xin B Li <xin.b.li@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Wed Jul 05 11:21:19 2006 +0100 (2006-07-05) |
parents | 4d2354be4aa6 |
children | 8e1ae72e905e |
files | xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/vmx/vmcs.h |
line diff
1.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Jul 05 10:32:33 2006 +0100 1.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Jul 05 11:21:19 2006 +0100 1.3 @@ -41,34 +41,52 @@ 1.4 #include <asm/shadow_64.h> 1.5 #endif 1.6 1.7 -int vmcs_size; 1.8 +static int vmcs_size; 1.9 +static int vmcs_order; 1.10 +static u32 vmcs_revision_id; 1.11 1.12 -struct vmcs_struct *vmx_alloc_vmcs(void) 1.13 +void vmx_init_vmcs_config(void) 1.14 { 1.15 - struct vmcs_struct *vmcs; 1.16 u32 vmx_msr_low, vmx_msr_high; 1.17 1.18 + if ( vmcs_size ) 1.19 + return; 1.20 + 1.21 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high); 1.22 - vmcs_size = vmx_msr_high & 0x1fff; 1.23 - vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size)); 1.24 - memset((char *)vmcs, 0, vmcs_size); /* don't remove this */ 1.25 + 1.26 + vmcs_revision_id = vmx_msr_low; 1.27 + 1.28 + vmcs_size = vmx_msr_high & 0x1fff; 1.29 + vmcs_order = get_order_from_bytes(vmcs_size); 1.30 +} 1.31 1.32 - vmcs->vmcs_revision_id = vmx_msr_low; 1.33 +static struct vmcs_struct *vmx_alloc_vmcs(void) 1.34 +{ 1.35 + struct vmcs_struct *vmcs; 1.36 + 1.37 + if ( (vmcs = alloc_xenheap_pages(vmcs_order)) == NULL ) 1.38 + { 1.39 + DPRINTK("Failed to allocate VMCS.\n"); 1.40 + return NULL; 1.41 + } 1.42 + 1.43 + memset(vmcs, 0, vmcs_size); /* don't remove this */ 1.44 + vmcs->vmcs_revision_id = vmcs_revision_id; 1.45 + 1.46 return vmcs; 1.47 } 1.48 1.49 -static void free_vmcs(struct vmcs_struct *vmcs) 1.50 +static void vmx_free_vmcs(struct vmcs_struct *vmcs) 1.51 { 1.52 - int order; 1.53 - 1.54 - order = get_order_from_bytes(vmcs_size); 1.55 - free_xenheap_pages(vmcs, order); 1.56 + free_xenheap_pages(vmcs, vmcs_order); 1.57 } 1.58 1.59 static void __vmx_clear_vmcs(void *info) 1.60 { 1.61 struct vcpu *v = info; 1.62 + 1.63 __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs)); 1.64 + 1.65 v->arch.hvm_vmx.active_cpu = -1; 1.66 v->arch.hvm_vmx.launched = 0; 1.67 } 1.68 @@ -128,11 +146,19 @@ void vmx_vmcs_exit(struct vcpu *v) 1.69 vcpu_unpause(v); 1.70 } 1.71 1.72 +struct vmcs_struct *vmx_alloc_host_vmcs(void) 1.73 +{ 1.74 + return vmx_alloc_vmcs(); 1.75 +} 1.76 + 1.77 +void vmx_free_host_vmcs(struct vmcs_struct *vmcs) 1.78 +{ 1.79 + vmx_free_vmcs(vmcs); 1.80 +} 1.81 + 1.82 static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx) 1.83 { 1.84 int error = 0; 1.85 - void *io_bitmap_a; 1.86 - void *io_bitmap_b; 1.87 1.88 error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, 1.89 MONITOR_PIN_BASED_EXEC_CONTROLS); 1.90 @@ -141,19 +167,8 @@ static inline int construct_vmcs_control 1.91 1.92 error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS); 1.93 1.94 - /* need to use 0x1000 instead of PAGE_SIZE */ 1.95 - io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 1.96 - io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 1.97 - memset(io_bitmap_a, 0xff, 0x1000); 1.98 - /* don't bother debug port access */ 1.99 - clear_bit(PC_DEBUG_PORT, io_bitmap_a); 1.100 - memset(io_bitmap_b, 0xff, 0x1000); 1.101 - 1.102 - error |= __vmwrite(IO_BITMAP_A, (u64) virt_to_maddr(io_bitmap_a)); 1.103 - error |= __vmwrite(IO_BITMAP_B, (u64) virt_to_maddr(io_bitmap_b)); 1.104 - 1.105 - arch_vmx->io_bitmap_a = io_bitmap_a; 1.106 - arch_vmx->io_bitmap_b = io_bitmap_b; 1.107 + error |= __vmwrite(IO_BITMAP_A, (u64)virt_to_maddr(arch_vmx->io_bitmap_a)); 1.108 + error |= __vmwrite(IO_BITMAP_B, (u64)virt_to_maddr(arch_vmx->io_bitmap_b)); 1.109 1.110 return error; 1.111 } 1.112 @@ -429,67 +444,52 @@ static inline int construct_vmcs_host(vo 1.113 } 1.114 1.115 /* 1.116 - * Need to extend to support full virtualization. 1.117 + * the working VMCS pointer has been set properly 1.118 + * just before entering this function. 1.119 */ 1.120 static int construct_vmcs(struct vcpu *v, 1.121 cpu_user_regs_t *regs) 1.122 { 1.123 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; 1.124 int error; 1.125 - long rc; 1.126 1.127 - memset(arch_vmx, 0, sizeof(struct arch_vmx_struct)); 1.128 - 1.129 - spin_lock_init(&arch_vmx->vmcs_lock); 1.130 - 1.131 - /* 1.132 - * Create a new VMCS 1.133 - */ 1.134 - if (!(arch_vmx->vmcs = vmx_alloc_vmcs())) { 1.135 - printk("Failed to create a new VMCS\n"); 1.136 - return -ENOMEM; 1.137 - } 1.138 - 1.139 - __vmx_clear_vmcs(v); 1.140 - vmx_load_vmcs(v); 1.141 - 1.142 - if ((error = construct_vmcs_controls(arch_vmx))) { 1.143 - printk("construct_vmcs: construct_vmcs_controls failed\n"); 1.144 - rc = -EINVAL; 1.145 - goto err_out; 1.146 + if ( (error = construct_vmcs_controls(arch_vmx)) ) { 1.147 + printk("construct_vmcs: construct_vmcs_controls failed.\n"); 1.148 + return error; 1.149 } 1.150 1.151 /* host selectors */ 1.152 - if ((error = construct_vmcs_host())) { 1.153 - printk("construct_vmcs: construct_vmcs_host failed\n"); 1.154 - rc = -EINVAL; 1.155 - goto err_out; 1.156 + if ( (error = construct_vmcs_host()) ) { 1.157 + printk("construct_vmcs: construct_vmcs_host failed.\n"); 1.158 + return error; 1.159 } 1.160 1.161 /* guest selectors */ 1.162 - if ((error = construct_init_vmcs_guest(regs))) { 1.163 - printk("construct_vmcs: construct_vmcs_guest failed\n"); 1.164 - rc = -EINVAL; 1.165 - goto err_out; 1.166 + if ( (error = construct_init_vmcs_guest(regs)) ) { 1.167 + printk("construct_vmcs: construct_vmcs_guest failed.\n"); 1.168 + return error; 1.169 + } 1.170 + 1.171 + if ( (error = __vmwrite(EXCEPTION_BITMAP, 1.172 + MONITOR_DEFAULT_EXCEPTION_BITMAP)) ) { 1.173 + printk("construct_vmcs: setting exception bitmap failed.\n"); 1.174 + return error; 1.175 } 1.176 1.177 - if ((error |= __vmwrite(EXCEPTION_BITMAP, 1.178 - MONITOR_DEFAULT_EXCEPTION_BITMAP))) { 1.179 - printk("construct_vmcs: setting Exception bitmap failed\n"); 1.180 - rc = -EINVAL; 1.181 - goto err_out; 1.182 - } 1.183 + if ( regs->eflags & EF_TF ) 1.184 + error = __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); 1.185 + else 1.186 + error = __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); 1.187 1.188 - if (regs->eflags & EF_TF) 1.189 - __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); 1.190 - else 1.191 - __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); 1.192 + return error; 1.193 +} 1.194 1.195 +int vmx_create_vmcs(struct vcpu *v) 1.196 +{ 1.197 + if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL ) 1.198 + return -ENOMEM; 1.199 + __vmx_clear_vmcs(v); 1.200 return 0; 1.201 - 1.202 -err_out: 1.203 - vmx_destroy_vmcs(v); 1.204 - return rc; 1.205 } 1.206 1.207 void vmx_destroy_vmcs(struct vcpu *v) 1.208 @@ -501,14 +501,14 @@ void vmx_destroy_vmcs(struct vcpu *v) 1.209 1.210 vmx_clear_vmcs(v); 1.211 1.212 - free_vmcs(arch_vmx->vmcs); 1.213 - arch_vmx->vmcs = NULL; 1.214 + free_xenheap_pages(arch_vmx->io_bitmap_a, IO_BITMAP_ORDER); 1.215 + free_xenheap_pages(arch_vmx->io_bitmap_b, IO_BITMAP_ORDER); 1.216 1.217 - free_xenheap_pages(arch_vmx->io_bitmap_a, get_order_from_bytes(0x1000)); 1.218 arch_vmx->io_bitmap_a = NULL; 1.219 + arch_vmx->io_bitmap_b = NULL; 1.220 1.221 - free_xenheap_pages(arch_vmx->io_bitmap_b, get_order_from_bytes(0x1000)); 1.222 - arch_vmx->io_bitmap_b = NULL; 1.223 + vmx_free_vmcs(arch_vmx->vmcs); 1.224 + arch_vmx->vmcs = NULL; 1.225 } 1.226 1.227 void vm_launch_fail(unsigned long eflags) 1.228 @@ -547,19 +547,20 @@ void arch_vmx_do_resume(struct vcpu *v) 1.229 1.230 void arch_vmx_do_launch(struct vcpu *v) 1.231 { 1.232 - int error; 1.233 cpu_user_regs_t *regs = ¤t->arch.guest_context.user_regs; 1.234 1.235 - error = construct_vmcs(v, regs); 1.236 - if ( error < 0 ) 1.237 + vmx_load_vmcs(v); 1.238 + 1.239 + if ( construct_vmcs(v, regs) < 0 ) 1.240 { 1.241 - if (v->vcpu_id == 0) { 1.242 - printk("Failed to construct a new VMCS for BSP.\n"); 1.243 + if ( v->vcpu_id == 0 ) { 1.244 + printk("Failed to construct VMCS for BSP.\n"); 1.245 } else { 1.246 - printk("Failed to construct a new VMCS for AP %d\n", v->vcpu_id); 1.247 + printk("Failed to construct VMCS for AP %d.\n", v->vcpu_id); 1.248 } 1.249 domain_crash_synchronous(); 1.250 } 1.251 + 1.252 vmx_do_launch(v); 1.253 reset_stack_and_jump(vmx_asm_do_vmentry); 1.254 }
2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Jul 05 10:32:33 2006 +0100 2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Jul 05 11:21:19 2006 +0100 2.3 @@ -54,34 +54,73 @@ static unsigned long trace_values[NR_CPU 2.4 static void vmx_ctxt_switch_from(struct vcpu *v); 2.5 static void vmx_ctxt_switch_to(struct vcpu *v); 2.6 2.7 -void vmx_final_setup_guest(struct vcpu *v) 2.8 +static int vmx_initialize_guest_resources(struct vcpu *v) 2.9 { 2.10 + struct domain *d = v->domain; 2.11 + struct vcpu *vc; 2.12 + void *io_bitmap_a, *io_bitmap_b; 2.13 + int rc; 2.14 + 2.15 v->arch.schedule_tail = arch_vmx_do_launch; 2.16 v->arch.ctxt_switch_from = vmx_ctxt_switch_from; 2.17 v->arch.ctxt_switch_to = vmx_ctxt_switch_to; 2.18 2.19 - if ( v->vcpu_id == 0 ) 2.20 + if ( v->vcpu_id != 0 ) 2.21 + return 1; 2.22 + 2.23 + for_each_vcpu ( d, vc ) 2.24 { 2.25 - struct domain *d = v->domain; 2.26 - struct vcpu *vc; 2.27 + /* Initialize monitor page table */ 2.28 + vc->arch.monitor_table = pagetable_null(); 2.29 + 2.30 + memset(&vc->arch.hvm_vmx, 0, sizeof(struct arch_vmx_struct)); 2.31 2.32 - /* Initialize monitor page table */ 2.33 - for_each_vcpu(d, vc) 2.34 - vc->arch.monitor_table = pagetable_null(); 2.35 + if ( (rc = vmx_create_vmcs(vc)) != 0 ) 2.36 + { 2.37 + DPRINTK("Failed to create VMCS for vcpu %d: err=%d.\n", 2.38 + vc->vcpu_id, rc); 2.39 + return 0; 2.40 + } 2.41 + 2.42 + spin_lock_init(&vc->arch.hvm_vmx.vmcs_lock); 2.43 + 2.44 + if ( (io_bitmap_a = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL ) 2.45 + { 2.46 + DPRINTK("Failed to allocate io bitmap b for vcpu %d.\n", 2.47 + vc->vcpu_id); 2.48 + return 0; 2.49 + } 2.50 2.51 - /* 2.52 - * Required to do this once per domain 2.53 - * XXX todo: add a seperate function to do these. 2.54 - */ 2.55 - memset(&d->shared_info->evtchn_mask[0], 0xff, 2.56 - sizeof(d->shared_info->evtchn_mask)); 2.57 + if ( (io_bitmap_b = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL ) 2.58 + { 2.59 + DPRINTK("Failed to allocate io bitmap b for vcpu %d.\n", 2.60 + vc->vcpu_id); 2.61 + return 0; 2.62 + } 2.63 + 2.64 + memset(io_bitmap_a, 0xff, 0x1000); 2.65 + memset(io_bitmap_b, 0xff, 0x1000); 2.66 + 2.67 + /* don't bother debug port access */ 2.68 + clear_bit(PC_DEBUG_PORT, io_bitmap_a); 2.69 2.70 - /* Put the domain in shadow mode even though we're going to be using 2.71 - * the shared 1:1 page table initially. It shouldn't hurt */ 2.72 - shadow_mode_enable(d, 2.73 - SHM_enable|SHM_refcounts| 2.74 - SHM_translate|SHM_external|SHM_wr_pt_pte); 2.75 + vc->arch.hvm_vmx.io_bitmap_a = io_bitmap_a; 2.76 + vc->arch.hvm_vmx.io_bitmap_b = io_bitmap_b; 2.77 } 2.78 + 2.79 + /* 2.80 + * Required to do this once per domain XXX todo: add a seperate function 2.81 + * to do these. 2.82 + */ 2.83 + memset(&d->shared_info->evtchn_mask[0], 0xff, 2.84 + sizeof(d->shared_info->evtchn_mask)); 2.85 + 2.86 + /* Put the domain in shadow mode even though we're going to be using 2.87 + * the shared 1:1 page table initially. It shouldn't hurt */ 2.88 + shadow_mode_enable( 2.89 + d, SHM_enable|SHM_refcounts|SHM_translate|SHM_external|SHM_wr_pt_pte); 2.90 + 2.91 + return 1; 2.92 } 2.93 2.94 static void vmx_relinquish_guest_resources(struct domain *d) 2.95 @@ -90,9 +129,9 @@ static void vmx_relinquish_guest_resourc 2.96 2.97 for_each_vcpu ( d, v ) 2.98 { 2.99 + vmx_destroy_vmcs(v); 2.100 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 2.101 continue; 2.102 - vmx_destroy_vmcs(v); 2.103 free_monitor_pagetable(v); 2.104 kill_timer(&v->arch.hvm_vmx.hlt_timer); 2.105 if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) ) 2.106 @@ -444,12 +483,6 @@ void stop_vmx(void) 2.107 __vmxoff(); 2.108 } 2.109 2.110 -int vmx_initialize_guest_resources(struct vcpu *v) 2.111 -{ 2.112 - vmx_final_setup_guest(v); 2.113 - return 1; 2.114 -} 2.115 - 2.116 void vmx_migrate_timers(struct vcpu *v) 2.117 { 2.118 struct periodic_time *pt = &(v->domain->arch.hvm_domain.pl_time.periodic_tm); 2.119 @@ -638,58 +671,61 @@ static int check_vmx_controls(u32 ctrls, 2.120 2.121 int start_vmx(void) 2.122 { 2.123 + u32 eax, edx; 2.124 struct vmcs_struct *vmcs; 2.125 - u32 ecx; 2.126 - u32 eax, edx; 2.127 - u64 phys_vmcs; /* debugging */ 2.128 2.129 /* 2.130 * Xen does not fill x86_capability words except 0. 2.131 */ 2.132 - ecx = cpuid_ecx(1); 2.133 - boot_cpu_data.x86_capability[4] = ecx; 2.134 + boot_cpu_data.x86_capability[4] = cpuid_ecx(1); 2.135 2.136 if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability))) 2.137 return 0; 2.138 2.139 rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx); 2.140 2.141 - if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) { 2.142 - if ((eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0) { 2.143 + if ( eax & IA32_FEATURE_CONTROL_MSR_LOCK ) 2.144 + { 2.145 + if ( (eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0 ) 2.146 + { 2.147 printk("VMX disabled by Feature Control MSR.\n"); 2.148 return 0; 2.149 } 2.150 } 2.151 - else { 2.152 + else 2.153 + { 2.154 wrmsr(IA32_FEATURE_CONTROL_MSR, 2.155 IA32_FEATURE_CONTROL_MSR_LOCK | 2.156 IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0); 2.157 } 2.158 2.159 - if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS, 2.160 - MSR_IA32_VMX_PINBASED_CTLS_MSR)) 2.161 + if ( !check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS, 2.162 + MSR_IA32_VMX_PINBASED_CTLS_MSR) ) 2.163 return 0; 2.164 - if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS, 2.165 - MSR_IA32_VMX_PROCBASED_CTLS_MSR)) 2.166 + if ( !check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS, 2.167 + MSR_IA32_VMX_PROCBASED_CTLS_MSR) ) 2.168 return 0; 2.169 - if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS, 2.170 - MSR_IA32_VMX_EXIT_CTLS_MSR)) 2.171 + if ( !check_vmx_controls(MONITOR_VM_EXIT_CONTROLS, 2.172 + MSR_IA32_VMX_EXIT_CTLS_MSR) ) 2.173 return 0; 2.174 - if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS, 2.175 - MSR_IA32_VMX_ENTRY_CTLS_MSR)) 2.176 + if ( !check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS, 2.177 + MSR_IA32_VMX_ENTRY_CTLS_MSR) ) 2.178 return 0; 2.179 2.180 - set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */ 2.181 + set_in_cr4(X86_CR4_VMXE); 2.182 + 2.183 + vmx_init_vmcs_config(); 2.184 2.185 - if (!(vmcs = vmx_alloc_vmcs())) { 2.186 - printk("Failed to allocate VMCS\n"); 2.187 + if ( (vmcs = vmx_alloc_host_vmcs()) == NULL ) 2.188 + { 2.189 + printk("Failed to allocate host VMCS\n"); 2.190 return 0; 2.191 } 2.192 2.193 - phys_vmcs = (u64) virt_to_maddr(vmcs); 2.194 - 2.195 - if (__vmxon(phys_vmcs)) { 2.196 + if ( __vmxon(virt_to_maddr(vmcs)) ) 2.197 + { 2.198 printk("VMXON failed\n"); 2.199 + vmx_free_host_vmcs(vmcs); 2.200 return 0; 2.201 } 2.202
3.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Jul 05 10:32:33 2006 +0100 3.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Jul 05 11:21:19 2006 +0100 3.3 @@ -27,9 +27,7 @@ 3.4 extern int start_vmx(void); 3.5 extern void stop_vmx(void); 3.6 extern void vmcs_dump_vcpu(void); 3.7 -void vmx_final_setup_guest(struct vcpu *v); 3.8 - 3.9 -void vmx_enter_scheduler(void); 3.10 +extern void vmx_init_vmcs_config(void); 3.11 3.12 enum { 3.13 VMX_CPU_STATE_PAE_ENABLED=0, 3.14 @@ -46,8 +44,6 @@ struct vmcs_struct { 3.15 unsigned char data [0]; /* vmcs size is read from MSR */ 3.16 }; 3.17 3.18 -extern int vmcs_size; 3.19 - 3.20 enum { 3.21 VMX_INDEX_MSR_LSTAR = 0, 3.22 VMX_INDEX_MSR_STAR, 3.23 @@ -64,6 +60,10 @@ struct vmx_msr_state { 3.24 unsigned long shadow_gs; 3.25 }; 3.26 3.27 +/* io bitmap is 4KBytes in size */ 3.28 +#define IO_BITMAP_SIZE 0x1000 3.29 +#define IO_BITMAP_ORDER (get_order_from_bytes(IO_BITMAP_SIZE)) 3.30 + 3.31 struct arch_vmx_struct { 3.32 /* Virtual address of VMCS. */ 3.33 struct vmcs_struct *vmcs; 3.34 @@ -101,7 +101,10 @@ struct arch_vmx_struct { 3.35 3.36 void vmx_do_resume(struct vcpu *); 3.37 3.38 -struct vmcs_struct *vmx_alloc_vmcs(void); 3.39 +struct vmcs_struct *vmx_alloc_host_vmcs(void); 3.40 +void vmx_free_host_vmcs(struct vmcs_struct *vmcs); 3.41 + 3.42 +int vmx_create_vmcs(struct vcpu *v); 3.43 void vmx_destroy_vmcs(struct vcpu *v); 3.44 void vmx_vmcs_enter(struct vcpu *v); 3.45 void vmx_vmcs_exit(struct vcpu *v);