ia64/xen-unstable
changeset 8974:0349fb4de335
Clean up some vmx code.
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Xin Li <xin.b.li@intel.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Thu Feb 23 11:34:11 2006 +0100 (2006-02-23) |
parents | 5bf4d9a9694f |
children | ef31d088adeb |
files | tools/libxc/xc_hvm_build.c tools/libxc/xc_ia64_stubs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/shadow.c xen/arch/x86/shadow32.c |
line diff
1.1 --- a/tools/libxc/xc_hvm_build.c Thu Feb 23 11:31:01 2006 +0100 1.2 +++ b/tools/libxc/xc_hvm_build.c Thu Feb 23 11:34:11 2006 +0100 1.3 @@ -20,7 +20,7 @@ 1.4 #define L3_PROT (_PAGE_PRESENT) 1.5 #endif 1.6 1.7 -#define E820MAX 128 1.8 +#define E820MAX 128 1.9 1.10 #define E820_RAM 1 1.11 #define E820_RESERVED 2 1.12 @@ -149,7 +149,7 @@ static int set_hvm_info(int xc_handle, u 1.13 PAGE_SIZE, 1.14 PROT_READ|PROT_WRITE, 1.15 pfn_list[HVM_INFO_PFN]); 1.16 - 1.17 + 1.18 if ( va_map == NULL ) 1.19 return -1; 1.20 1.21 @@ -365,8 +365,8 @@ int xc_hvm_build(int xc_handle, 1.22 1.23 if ( !strstr(xen_caps, "hvm") ) 1.24 { 1.25 - PERROR("CPU doesn't support HVM extensions or " 1.26 - "the extensions are not enabled"); 1.27 + PERROR("CPU doesn't support HVM extensions or " 1.28 + "the extensions are not enabled"); 1.29 goto error_out; 1.30 } 1.31
2.1 --- a/tools/libxc/xc_ia64_stubs.c Thu Feb 23 11:31:01 2006 +0100 2.2 +++ b/tools/libxc/xc_ia64_stubs.c Thu Feb 23 11:34:11 2006 +0100 2.3 @@ -16,7 +16,7 @@ 2.4 #undef __IA64_UL 2.5 #define __IA64_UL(x) ((unsigned long)(x)) 2.6 #undef __ASSEMBLY__ 2.7 - 2.8 + 2.9 unsigned long xc_ia64_fpsr_default(void) 2.10 { 2.11 return FPSR_DEFAULT;
3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Feb 23 11:31:01 2006 +0100 3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Feb 23 11:34:11 2006 +0100 3.3 @@ -642,7 +642,7 @@ static void vmx_do_no_device_fault(void) 3.4 } 3.5 3.6 /* Reserved bits: [31:15], [12:11], [9], [6], [2:1] */ 3.7 -#define VMX_VCPU_CPUID_L1_RESERVED 0xffff9a46 3.8 +#define VMX_VCPU_CPUID_L1_RESERVED 0xffff9a46 3.9 3.10 static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs) 3.11 { 3.12 @@ -1185,8 +1185,12 @@ static int vmx_set_cr0(unsigned long val 3.13 3.14 HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value); 3.15 3.16 - if ((value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled) { 3.17 + if ( (value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled ) 3.18 + { 3.19 + unsigned long cr4; 3.20 + 3.21 /* 3.22 + * Trying to enable guest paging. 3.23 * The guest CR3 must be pointing to the guest physical. 3.24 */ 3.25 if ( !VALID_MFN(mfn = get_mfn_from_gpfn( 3.26 @@ -1198,52 +1202,51 @@ static int vmx_set_cr0(unsigned long val 3.27 } 3.28 3.29 #if defined(__x86_64__) 3.30 - if (test_bit(VMX_CPU_STATE_LME_ENABLED, 3.31 - &v->arch.hvm_vmx.cpu_state) && 3.32 - !test_bit(VMX_CPU_STATE_PAE_ENABLED, 3.33 - &v->arch.hvm_vmx.cpu_state)){ 3.34 - HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n"); 3.35 + if ( test_bit(VMX_CPU_STATE_LME_ENABLED, 3.36 + &v->arch.hvm_vmx.cpu_state) && 3.37 + !test_bit(VMX_CPU_STATE_PAE_ENABLED, 3.38 + &v->arch.hvm_vmx.cpu_state) ) 3.39 + { 3.40 + HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enabled\n"); 3.41 vmx_inject_exception(v, TRAP_gp_fault, 0); 3.42 } 3.43 - if (test_bit(VMX_CPU_STATE_LME_ENABLED, 3.44 - &v->arch.hvm_vmx.cpu_state)){ 3.45 - /* Here the PAE is should to be opened */ 3.46 - HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n"); 3.47 + 3.48 + if ( test_bit(VMX_CPU_STATE_LME_ENABLED, 3.49 + &v->arch.hvm_vmx.cpu_state) ) 3.50 + { 3.51 + /* Here the PAE is should be opened */ 3.52 + HVM_DBG_LOG(DBG_LEVEL_1, "Enable long mode\n"); 3.53 set_bit(VMX_CPU_STATE_LMA_ENABLED, 3.54 &v->arch.hvm_vmx.cpu_state); 3.55 + 3.56 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value); 3.57 vm_entry_value |= VM_ENTRY_CONTROLS_IA32E_MODE; 3.58 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value); 3.59 3.60 -#if CONFIG_PAGING_LEVELS >= 4 3.61 - if(!shadow_set_guest_paging_levels(v->domain, 4)) { 3.62 + if ( !shadow_set_guest_paging_levels(v->domain, 4) ) { 3.63 printk("Unsupported guest paging levels\n"); 3.64 domain_crash_synchronous(); /* need to take a clean path */ 3.65 } 3.66 -#endif 3.67 } 3.68 else 3.69 #endif /* __x86_64__ */ 3.70 { 3.71 #if CONFIG_PAGING_LEVELS >= 3 3.72 - if(!shadow_set_guest_paging_levels(v->domain, 2)) { 3.73 + if ( !shadow_set_guest_paging_levels(v->domain, 2) ) { 3.74 printk("Unsupported guest paging levels\n"); 3.75 domain_crash_synchronous(); /* need to take a clean path */ 3.76 } 3.77 #endif 3.78 } 3.79 3.80 + /* update CR4's PAE if needed */ 3.81 + __vmread(GUEST_CR4, &cr4); 3.82 + if ( (!(cr4 & X86_CR4_PAE)) && 3.83 + test_bit(VMX_CPU_STATE_PAE_ENABLED, 3.84 + &v->arch.hvm_vmx.cpu_state) ) 3.85 { 3.86 - unsigned long crn; 3.87 - /* update CR4's PAE if needed */ 3.88 - __vmread(GUEST_CR4, &crn); 3.89 - if ( (!(crn & X86_CR4_PAE)) && 3.90 - test_bit(VMX_CPU_STATE_PAE_ENABLED, 3.91 - &v->arch.hvm_vmx.cpu_state) ) 3.92 - { 3.93 - HVM_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n"); 3.94 - __vmwrite(GUEST_CR4, crn | X86_CR4_PAE); 3.95 - } 3.96 + HVM_DBG_LOG(DBG_LEVEL_1, "enable PAE in cr4\n"); 3.97 + __vmwrite(GUEST_CR4, cr4 | X86_CR4_PAE); 3.98 } 3.99 3.100 /* 3.101 @@ -1263,8 +1266,8 @@ static int vmx_set_cr0(unsigned long val 3.102 v->arch.hvm_vmx.cpu_cr3, mfn); 3.103 } 3.104 3.105 - if(!((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled) 3.106 - if(v->arch.hvm_vmx.cpu_cr3) { 3.107 + if ( !((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled ) 3.108 + if ( v->arch.hvm_vmx.cpu_cr3 ) { 3.109 put_page(mfn_to_page(get_mfn_from_gpfn( 3.110 v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT))); 3.111 v->arch.guest_table = mk_pagetable(0); 3.112 @@ -1275,7 +1278,8 @@ static int vmx_set_cr0(unsigned long val 3.113 * real-mode by performing a world switch to VMXAssist whenever 3.114 * a partition disables the CR0.PE bit. 3.115 */ 3.116 - if ((value & X86_CR0_PE) == 0) { 3.117 + if ( (value & X86_CR0_PE) == 0 ) 3.118 + { 3.119 if ( value & X86_CR0_PG ) { 3.120 /* inject GP here */ 3.121 vmx_inject_exception(v, TRAP_gp_fault, 0); 3.122 @@ -1285,8 +1289,9 @@ static int vmx_set_cr0(unsigned long val 3.123 * Disable paging here. 3.124 * Same to PE == 1 && PG == 0 3.125 */ 3.126 - if (test_bit(VMX_CPU_STATE_LMA_ENABLED, 3.127 - &v->arch.hvm_vmx.cpu_state)){ 3.128 + if ( test_bit(VMX_CPU_STATE_LMA_ENABLED, 3.129 + &v->arch.hvm_vmx.cpu_state) ) 3.130 + { 3.131 clear_bit(VMX_CPU_STATE_LMA_ENABLED, 3.132 &v->arch.hvm_vmx.cpu_state); 3.133 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value); 3.134 @@ -1296,19 +1301,21 @@ static int vmx_set_cr0(unsigned long val 3.135 } 3.136 3.137 clear_all_shadow_status(v->domain); 3.138 - if (vmx_assist(v, VMX_ASSIST_INVOKE)) { 3.139 + if ( vmx_assist(v, VMX_ASSIST_INVOKE) ) { 3.140 set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &v->arch.hvm_vmx.cpu_state); 3.141 __vmread(GUEST_RIP, &eip); 3.142 HVM_DBG_LOG(DBG_LEVEL_1, 3.143 "Transfering control to vmxassist %%eip 0x%lx\n", eip); 3.144 return 0; /* do not update eip! */ 3.145 } 3.146 - } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED, 3.147 - &v->arch.hvm_vmx.cpu_state)) { 3.148 + } else if ( test_bit(VMX_CPU_STATE_ASSIST_ENABLED, 3.149 + &v->arch.hvm_vmx.cpu_state) ) 3.150 + { 3.151 __vmread(GUEST_RIP, &eip); 3.152 HVM_DBG_LOG(DBG_LEVEL_1, 3.153 "Enabling CR0.PE at %%eip 0x%lx\n", eip); 3.154 - if (vmx_assist(v, VMX_ASSIST_RESTORE)) { 3.155 + if ( vmx_assist(v, VMX_ASSIST_RESTORE) ) 3.156 + { 3.157 clear_bit(VMX_CPU_STATE_ASSIST_ENABLED, 3.158 &v->arch.hvm_vmx.cpu_state); 3.159 __vmread(GUEST_RIP, &eip); 3.160 @@ -1438,15 +1445,13 @@ static int mov_to_cr(int gp, int cr, str 3.161 } 3.162 case 4: /* CR4 */ 3.163 { 3.164 - unsigned long old_cr4; 3.165 + __vmread(CR4_READ_SHADOW, &old_cr); 3.166 3.167 - __vmread(CR4_READ_SHADOW, &old_cr4); 3.168 - 3.169 - if ( value & X86_CR4_PAE && !(old_cr4 & X86_CR4_PAE) ) 3.170 + if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) ) 3.171 { 3.172 set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state); 3.173 3.174 - if ( vmx_pgbit_test(v) ) 3.175 + if ( vmx_pgbit_test(v) ) 3.176 { 3.177 /* The guest is 32 bit. */ 3.178 #if CONFIG_PAGING_LEVELS >= 4 3.179 @@ -1460,7 +1465,7 @@ static int mov_to_cr(int gp, int cr, str 3.180 3.181 if ( !VALID_MFN(mfn = get_mfn_from_gpfn( 3.182 v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)) || 3.183 - !get_page(mfn_to_page(mfn), v->domain) ) 3.184 + !get_page(mfn_to_page(mfn), v->domain) ) 3.185 { 3.186 printk("Invalid CR3 value = %lx", v->arch.hvm_vmx.cpu_cr3); 3.187 domain_crash_synchronous(); /* need to take a clean path */ 3.188 @@ -1489,12 +1494,12 @@ static int mov_to_cr(int gp, int cr, str 3.189 HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx", 3.190 v->arch.hvm_vmx.cpu_cr3, mfn); 3.191 #endif 3.192 - } 3.193 + } 3.194 else 3.195 { 3.196 /* The guest is 64 bit. */ 3.197 #if CONFIG_PAGING_LEVELS >= 4 3.198 - if ( !shadow_set_guest_paging_levels(v->domain, 4) ) 3.199 + if ( !shadow_set_guest_paging_levels(v->domain, 4) ) 3.200 { 3.201 printk("Unsupported guest paging levels\n"); 3.202 domain_crash_synchronous(); /* need to take a clean path */ 3.203 @@ -1512,7 +1517,6 @@ static int mov_to_cr(int gp, int cr, str 3.204 clear_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state); 3.205 } 3.206 3.207 - __vmread(CR4_READ_SHADOW, &old_cr); 3.208 __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK); 3.209 __vmwrite(CR4_READ_SHADOW, value); 3.210
4.1 --- a/xen/arch/x86/shadow.c Thu Feb 23 11:31:01 2006 +0100 4.2 +++ b/xen/arch/x86/shadow.c Thu Feb 23 11:34:11 2006 +0100 4.3 @@ -3942,9 +3942,7 @@ int shadow_direct_map_fault(unsigned lon 4.4 * on handling the #PF as such. 4.5 */ 4.6 if ( (mfn = get_mfn_from_gpfn(vpa >> PAGE_SHIFT)) == INVALID_MFN ) 4.7 - { 4.8 - goto fail; 4.9 - } 4.10 + return 0; 4.11 4.12 shadow_lock(d); 4.13 4.14 @@ -3994,9 +3992,6 @@ int shadow_direct_map_fault(unsigned lon 4.15 shadow_unlock(d); 4.16 return EXCRET_fault_fixed; 4.17 4.18 -fail: 4.19 - return 0; 4.20 - 4.21 nomem: 4.22 shadow_direct_map_clean(d); 4.23 domain_crash_synchronous();
5.1 --- a/xen/arch/x86/shadow32.c Thu Feb 23 11:31:01 2006 +0100 5.2 +++ b/xen/arch/x86/shadow32.c Thu Feb 23 11:34:11 2006 +0100 5.3 @@ -1039,9 +1039,7 @@ int shadow_direct_map_fault(unsigned lon 5.4 * on handling the #PF as such. 5.5 */ 5.6 if ( (mfn = get_mfn_from_gpfn(vpa >> PAGE_SHIFT)) == INVALID_MFN ) 5.7 - { 5.8 - goto fail; 5.9 - } 5.10 + return 0; 5.11 5.12 shadow_lock(d); 5.13 5.14 @@ -1078,9 +1076,6 @@ int shadow_direct_map_fault(unsigned lon 5.15 shadow_unlock(d); 5.16 return EXCRET_fault_fixed; 5.17 5.18 -fail: 5.19 - return 0; 5.20 - 5.21 nomem: 5.22 shadow_direct_map_clean(d); 5.23 domain_crash_synchronous();