ia64/xen-unstable
changeset 7265:4b9c9b85b3a5
Clean up file vmx.c:
1) change d to v.
2) remove trailing spaces.
Signed-off-by: Xin Li <xin.b.li@intel.com>
1) change d to v.
2) remove trailing spaces.
Signed-off-by: Xin Li <xin.b.li@intel.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Fri Oct 07 15:49:33 2005 +0100 (2005-10-07) |
parents | 409cea2432fc |
children | 402b5eb85905 |
files | xen/arch/x86/vmx.c |
line diff
1.1 --- a/xen/arch/x86/vmx.c Fri Oct 07 14:47:45 2005 +0100 1.2 +++ b/xen/arch/x86/vmx.c Fri Oct 07 15:49:33 2005 +0100 1.3 @@ -69,7 +69,7 @@ static u32 msr_data_index[VMX_MSR_COUNT] 1.4 /* 1.5 * To avoid MSR save/restore at every VM exit/entry time, we restore 1.6 * the x86_64 specific MSRs at domain switch time. Since those MSRs are 1.7 - * are not modified once set for generic domains, we don't save them, 1.8 + * are not modified once set for generic domains, we don't save them, 1.9 * but simply reset them to the values set at percpu_traps_init(). 1.10 */ 1.11 void vmx_load_msrs(struct vcpu *n) 1.12 @@ -160,13 +160,13 @@ static inline int long_mode_do_msr_read( 1.13 1.14 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs) 1.15 { 1.16 - u64 msr_content = regs->eax | ((u64)regs->edx << 32); 1.17 + u64 msr_content = regs->eax | ((u64)regs->edx << 32); 1.18 struct vcpu *vc = current; 1.19 struct msr_state * msr = &vc->arch.arch_vmx.msr_content; 1.20 - struct msr_state * host_state = 1.21 + struct msr_state * host_state = 1.22 &percpu_msr[smp_processor_id()]; 1.23 1.24 - VMX_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx msr_content %lx\n", 1.25 + VMX_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx msr_content %lx\n", 1.26 regs->ecx, msr_content); 1.27 1.28 switch (regs->ecx){ 1.29 @@ -189,11 +189,11 @@ static inline int long_mode_do_msr_write 1.30 msr_content; 1.31 if (msr_content & ~(EFER_LME | EFER_LMA)){ 1.32 msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content; 1.33 - if (!test_bit(VMX_INDEX_MSR_EFER, &msr->flags)){ 1.34 + if (!test_bit(VMX_INDEX_MSR_EFER, &msr->flags)){ 1.35 rdmsrl(MSR_EFER, 1.36 host_state->msr_items[VMX_INDEX_MSR_EFER]); 1.37 set_bit(VMX_INDEX_MSR_EFER, &host_state->flags); 1.38 - set_bit(VMX_INDEX_MSR_EFER, &msr->flags); 1.39 + set_bit(VMX_INDEX_MSR_EFER, &msr->flags); 1.40 wrmsrl(MSR_EFER, msr_content); 1.41 } 1.42 } 1.43 @@ -209,7 +209,7 @@ static inline int long_mode_do_msr_write 1.44 } 1.45 if (regs->ecx == MSR_FS_BASE) 1.46 __vmwrite(GUEST_FS_BASE, msr_content); 1.47 - else 1.48 + else 1.49 __vmwrite(GUEST_GS_BASE, msr_content); 1.50 break; 1.51 1.52 @@ -231,14 +231,14 @@ static inline int long_mode_do_msr_write 1.53 } 1.54 1.55 void 1.56 -vmx_restore_msrs(struct vcpu *d) 1.57 +vmx_restore_msrs(struct vcpu *v) 1.58 { 1.59 int i = 0; 1.60 struct msr_state *guest_state; 1.61 struct msr_state *host_state; 1.62 unsigned long guest_flags ; 1.63 1.64 - guest_state = &d->arch.arch_vmx.msr_content;; 1.65 + guest_state = &v->arch.arch_vmx.msr_content;; 1.66 host_state = &percpu_msr[smp_processor_id()]; 1.67 1.68 wrmsrl(MSR_SHADOW_GS_BASE, guest_state->shadow_gs); 1.69 @@ -274,13 +274,13 @@ extern long do_block(void); 1.70 void do_nmi(struct cpu_user_regs *, unsigned long); 1.71 1.72 static int check_vmx_controls(ctrls, msr) 1.73 -{ 1.74 - u32 vmx_msr_low, vmx_msr_high; 1.75 +{ 1.76 + u32 vmx_msr_low, vmx_msr_high; 1.77 1.78 rdmsr(msr, vmx_msr_low, vmx_msr_high); 1.79 if (ctrls < vmx_msr_low || ctrls > vmx_msr_high) { 1.80 printk("Insufficient VMX capability 0x%x, " 1.81 - "msr=0x%x,low=0x%8x,high=0x%x\n", 1.82 + "msr=0x%x,low=0x%8x,high=0x%x\n", 1.83 ctrls, msr, vmx_msr_low, vmx_msr_high); 1.84 return 0; 1.85 } 1.86 @@ -302,7 +302,7 @@ int start_vmx(void) 1.87 1.88 if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability))) 1.89 return 0; 1.90 - 1.91 + 1.92 rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx); 1.93 1.94 if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) { 1.95 @@ -312,28 +312,28 @@ int start_vmx(void) 1.96 } 1.97 } 1.98 else { 1.99 - wrmsr(IA32_FEATURE_CONTROL_MSR, 1.100 + wrmsr(IA32_FEATURE_CONTROL_MSR, 1.101 IA32_FEATURE_CONTROL_MSR_LOCK | 1.102 IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0); 1.103 } 1.104 1.105 - if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS, 1.106 + if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS, 1.107 MSR_IA32_VMX_PINBASED_CTLS_MSR)) 1.108 return 0; 1.109 - if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS, 1.110 + if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS, 1.111 MSR_IA32_VMX_PROCBASED_CTLS_MSR)) 1.112 return 0; 1.113 - if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS, 1.114 + if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS, 1.115 MSR_IA32_VMX_EXIT_CTLS_MSR)) 1.116 return 0; 1.117 - if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS, 1.118 + if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS, 1.119 MSR_IA32_VMX_ENTRY_CTLS_MSR)) 1.120 return 0; 1.121 1.122 set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */ 1.123 1.124 if (!(vmcs = alloc_vmcs())) { 1.125 - printk("Failed to allocate VMCS\n"); 1.126 + printk("Failed to allocate VMCS\n"); 1.127 return 0; 1.128 } 1.129 1.130 @@ -364,7 +364,7 @@ void stop_vmx(void) 1.131 if ((len) < 1 || (len) > 15) \ 1.132 __vmx_bug(®s); 1.133 1.134 -static void inline __update_guest_eip(unsigned long inst_len) 1.135 +static void inline __update_guest_eip(unsigned long inst_len) 1.136 { 1.137 unsigned long current_eip; 1.138 1.139 @@ -373,7 +373,7 @@ static void inline __update_guest_eip(un 1.140 } 1.141 1.142 1.143 -static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 1.144 +static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 1.145 { 1.146 unsigned long gpa; /* FIXME: PAE */ 1.147 int result; 1.148 @@ -383,7 +383,7 @@ static int vmx_do_page_fault(unsigned lo 1.149 unsigned long eip; 1.150 1.151 __vmread(GUEST_RIP, &eip); 1.152 - VMX_DBG_LOG(DBG_LEVEL_VMMU, 1.153 + VMX_DBG_LOG(DBG_LEVEL_VMMU, 1.154 "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx", 1.155 va, eip, (unsigned long)regs->error_code); 1.156 } 1.157 @@ -425,7 +425,7 @@ static int vmx_do_page_fault(unsigned lo 1.158 static void vmx_do_no_device_fault(void) 1.159 { 1.160 unsigned long cr0; 1.161 - 1.162 + 1.163 clts(); 1.164 setup_fpu(current); 1.165 __vmread_vcpu(CR0_READ_SHADOW, &cr0); 1.166 @@ -438,14 +438,14 @@ static void vmx_do_no_device_fault(void) 1.167 } 1.168 1.169 1.170 -static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs) 1.171 +static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs) 1.172 { 1.173 unsigned int eax, ebx, ecx, edx; 1.174 unsigned long eip; 1.175 1.176 __vmread(GUEST_RIP, &eip); 1.177 1.178 - VMX_DBG_LOG(DBG_LEVEL_1, 1.179 + VMX_DBG_LOG(DBG_LEVEL_1, 1.180 "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx," 1.181 " (esi) %lx, (edi) %lx", 1.182 (unsigned long)regs->eax, (unsigned long)regs->ebx, 1.183 @@ -460,8 +460,8 @@ static void vmx_vmexit_do_cpuid(unsigned 1.184 clear_bit(X86_FEATURE_PAE, &edx); 1.185 clear_bit(X86_FEATURE_PSE36, &edx); 1.186 #else 1.187 - struct vcpu *d = current; 1.188 - if (d->domain->arch.ops->guest_paging_levels == PAGING_L2) 1.189 + struct vcpu *v = current; 1.190 + if (v->domain->arch.ops->guest_paging_levels == PAGING_L2) 1.191 { 1.192 clear_bit(X86_FEATURE_PSE, &edx); 1.193 clear_bit(X86_FEATURE_PAE, &edx); 1.194 @@ -478,7 +478,7 @@ static void vmx_vmexit_do_cpuid(unsigned 1.195 regs->ecx = (unsigned long) ecx; 1.196 regs->edx = (unsigned long) edx; 1.197 1.198 - VMX_DBG_LOG(DBG_LEVEL_1, 1.199 + VMX_DBG_LOG(DBG_LEVEL_1, 1.200 "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x", 1.201 eip, input, eax, ebx, ecx, edx); 1.202 1.203 @@ -498,7 +498,7 @@ static void vmx_dr_access (unsigned long 1.204 1.205 reg = exit_qualification & DEBUG_REG_ACCESS_NUM; 1.206 1.207 - VMX_DBG_LOG(DBG_LEVEL_1, 1.208 + VMX_DBG_LOG(DBG_LEVEL_1, 1.209 "vmx_dr_access : eip=%lx, reg=%d, exit_qualification = %lx", 1.210 eip, reg, exit_qualification); 1.211 1.212 @@ -511,16 +511,16 @@ static void vmx_dr_access (unsigned long 1.213 CASE_GET_REG_P(ESI, esi); 1.214 CASE_GET_REG_P(EDI, edi); 1.215 case REG_ESP: 1.216 - break; 1.217 + break; 1.218 default: 1.219 __vmx_bug(regs); 1.220 } 1.221 - 1.222 + 1.223 switch (exit_qualification & DEBUG_REG_ACCESS_TYPE) { 1.224 - case TYPE_MOV_TO_DR: 1.225 + case TYPE_MOV_TO_DR: 1.226 /* don't need to check the range */ 1.227 if (reg != REG_ESP) 1.228 - v->arch.guest_context.debugreg[reg] = *reg_p; 1.229 + v->arch.guest_context.debugreg[reg] = *reg_p; 1.230 else { 1.231 unsigned long value; 1.232 __vmread(GUEST_RSP, &value); 1.233 @@ -541,7 +541,7 @@ static void vmx_dr_access (unsigned long 1.234 * Invalidate the TLB for va. Invalidate the shadow page corresponding 1.235 * the address va. 1.236 */ 1.237 -static void vmx_vmexit_do_invlpg(unsigned long va) 1.238 +static void vmx_vmexit_do_invlpg(unsigned long va) 1.239 { 1.240 unsigned long eip; 1.241 struct vcpu *v = current; 1.242 @@ -656,8 +656,8 @@ void send_pio_req(struct cpu_user_regs * 1.243 vmx_wait_io(); 1.244 } 1.245 1.246 -static void vmx_io_instruction(struct cpu_user_regs *regs, 1.247 - unsigned long exit_qualification, unsigned long inst_len) 1.248 +static void vmx_io_instruction(struct cpu_user_regs *regs, 1.249 + unsigned long exit_qualification, unsigned long inst_len) 1.250 { 1.251 struct mi_per_cpu_info *mpcip; 1.252 unsigned long eip, cs, eflags; 1.253 @@ -673,7 +673,7 @@ static void vmx_io_instruction(struct cp 1.254 __vmread(GUEST_RFLAGS, &eflags); 1.255 vm86 = eflags & X86_EFLAGS_VM ? 1 : 0; 1.256 1.257 - VMX_DBG_LOG(DBG_LEVEL_1, 1.258 + VMX_DBG_LOG(DBG_LEVEL_1, 1.259 "vmx_io_instruction: vm86 %d, eip=%lx:%lx, " 1.260 "exit_qualification = %lx", 1.261 vm86, cs, eip, exit_qualification); 1.262 @@ -770,7 +770,7 @@ vmx_copy(void *buf, unsigned long laddr, 1.263 } 1.264 1.265 int 1.266 -vmx_world_save(struct vcpu *d, struct vmx_assist_context *c) 1.267 +vmx_world_save(struct vcpu *v, struct vmx_assist_context *c) 1.268 { 1.269 unsigned long inst_len; 1.270 int error = 0; 1.271 @@ -782,7 +782,7 @@ vmx_world_save(struct vcpu *d, struct vm 1.272 error |= __vmread(GUEST_RFLAGS, &c->eflags); 1.273 1.274 error |= __vmread(CR0_READ_SHADOW, &c->cr0); 1.275 - c->cr3 = d->arch.arch_vmx.cpu_cr3; 1.276 + c->cr3 = v->arch.arch_vmx.cpu_cr3; 1.277 error |= __vmread(CR4_READ_SHADOW, &c->cr4); 1.278 1.279 error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit); 1.280 @@ -835,7 +835,7 @@ vmx_world_save(struct vcpu *d, struct vm 1.281 } 1.282 1.283 int 1.284 -vmx_world_restore(struct vcpu *d, struct vmx_assist_context *c) 1.285 +vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c) 1.286 { 1.287 unsigned long mfn, old_cr4; 1.288 int error = 0; 1.289 @@ -846,45 +846,45 @@ vmx_world_restore(struct vcpu *d, struct 1.290 1.291 error |= __vmwrite(CR0_READ_SHADOW, c->cr0); 1.292 1.293 - if (!vmx_paging_enabled(d)) { 1.294 + if (!vmx_paging_enabled(v)) { 1.295 VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table"); 1.296 - __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table)); 1.297 + __vmwrite(GUEST_CR3, pagetable_get_paddr(v->domain->arch.phys_table)); 1.298 goto skip_cr3; 1.299 } 1.300 1.301 - if (c->cr3 == d->arch.arch_vmx.cpu_cr3) { 1.302 - /* 1.303 - * This is simple TLB flush, implying the guest has 1.304 + if (c->cr3 == v->arch.arch_vmx.cpu_cr3) { 1.305 + /* 1.306 + * This is simple TLB flush, implying the guest has 1.307 * removed some translation or changed page attributes. 1.308 * We simply invalidate the shadow. 1.309 */ 1.310 mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT); 1.311 - if (mfn != pagetable_get_pfn(d->arch.guest_table)) { 1.312 + if (mfn != pagetable_get_pfn(v->arch.guest_table)) { 1.313 printk("Invalid CR3 value=%x", c->cr3); 1.314 domain_crash_synchronous(); 1.315 return 0; 1.316 } 1.317 - shadow_sync_all(d->domain); 1.318 + shadow_sync_all(v->domain); 1.319 } else { 1.320 /* 1.321 * If different, make a shadow. Check if the PDBR is valid 1.322 * first. 1.323 */ 1.324 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3); 1.325 - if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) { 1.326 + if ((c->cr3 >> PAGE_SHIFT) > v->domain->max_pages) { 1.327 printk("Invalid CR3 value=%x", c->cr3); 1.328 - domain_crash_synchronous(); 1.329 + domain_crash_synchronous(); 1.330 return 0; 1.331 } 1.332 mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT); 1.333 - d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); 1.334 - update_pagetables(d); 1.335 - /* 1.336 + v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); 1.337 + update_pagetables(v); 1.338 + /* 1.339 * arch.shadow_table should now hold the next CR3 for shadow 1.340 */ 1.341 - d->arch.arch_vmx.cpu_cr3 = c->cr3; 1.342 + v->arch.arch_vmx.cpu_cr3 = c->cr3; 1.343 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3); 1.344 - __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table)); 1.345 + __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table)); 1.346 } 1.347 1.348 skip_cr3: 1.349 @@ -945,7 +945,7 @@ vmx_world_restore(struct vcpu *d, struct 1.350 enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE }; 1.351 1.352 int 1.353 -vmx_assist(struct vcpu *d, int mode) 1.354 +vmx_assist(struct vcpu *v, int mode) 1.355 { 1.356 struct vmx_assist_context c; 1.357 u32 magic; 1.358 @@ -969,7 +969,7 @@ vmx_assist(struct vcpu *d, int mode) 1.359 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN)) 1.360 goto error; 1.361 if (cp != 0) { 1.362 - if (!vmx_world_save(d, &c)) 1.363 + if (!vmx_world_save(v, &c)) 1.364 goto error; 1.365 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_OUT)) 1.366 goto error; 1.367 @@ -981,7 +981,7 @@ vmx_assist(struct vcpu *d, int mode) 1.368 if (cp != 0) { 1.369 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN)) 1.370 goto error; 1.371 - if (!vmx_world_restore(d, &c)) 1.372 + if (!vmx_world_restore(v, &c)) 1.373 goto error; 1.374 return 1; 1.375 } 1.376 @@ -998,7 +998,7 @@ vmx_assist(struct vcpu *d, int mode) 1.377 if (cp != 0) { 1.378 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN)) 1.379 goto error; 1.380 - if (!vmx_world_restore(d, &c)) 1.381 + if (!vmx_world_restore(v, &c)) 1.382 goto error; 1.383 return 1; 1.384 } 1.385 @@ -1007,21 +1007,21 @@ vmx_assist(struct vcpu *d, int mode) 1.386 1.387 error: 1.388 printf("Failed to transfer to vmxassist\n"); 1.389 - domain_crash_synchronous(); 1.390 + domain_crash_synchronous(); 1.391 return 0; 1.392 } 1.393 1.394 static int vmx_set_cr0(unsigned long value) 1.395 { 1.396 - struct vcpu *d = current; 1.397 + struct vcpu *v = current; 1.398 unsigned long mfn; 1.399 unsigned long eip; 1.400 int paging_enabled; 1.401 unsigned long vm_entry_value; 1.402 - /* 1.403 + /* 1.404 * CR0: We don't want to lose PE and PG. 1.405 */ 1.406 - paging_enabled = vmx_paging_enabled(d); 1.407 + paging_enabled = vmx_paging_enabled(v); 1.408 __vmwrite(GUEST_CR0, value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE); 1.409 __vmwrite(CR0_READ_SHADOW, value); 1.410 1.411 @@ -1032,33 +1032,33 @@ static int vmx_set_cr0(unsigned long val 1.412 * The guest CR3 must be pointing to the guest physical. 1.413 */ 1.414 if ( !VALID_MFN(mfn = get_mfn_from_pfn( 1.415 - d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) || 1.416 - !get_page(pfn_to_page(mfn), d->domain) ) 1.417 + v->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) || 1.418 + !get_page(pfn_to_page(mfn), v->domain) ) 1.419 { 1.420 - printk("Invalid CR3 value = %lx", d->arch.arch_vmx.cpu_cr3); 1.421 + printk("Invalid CR3 value = %lx", v->arch.arch_vmx.cpu_cr3); 1.422 domain_crash_synchronous(); /* need to take a clean path */ 1.423 } 1.424 1.425 #if defined(__x86_64__) 1.426 if (test_bit(VMX_CPU_STATE_LME_ENABLED, 1.427 - &d->arch.arch_vmx.cpu_state) && 1.428 + &v->arch.arch_vmx.cpu_state) && 1.429 !test_bit(VMX_CPU_STATE_PAE_ENABLED, 1.430 - &d->arch.arch_vmx.cpu_state)){ 1.431 + &v->arch.arch_vmx.cpu_state)){ 1.432 VMX_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n"); 1.433 - vmx_inject_exception(d, TRAP_gp_fault, 0); 1.434 + vmx_inject_exception(v, TRAP_gp_fault, 0); 1.435 } 1.436 if (test_bit(VMX_CPU_STATE_LME_ENABLED, 1.437 - &d->arch.arch_vmx.cpu_state)){ 1.438 + &v->arch.arch_vmx.cpu_state)){ 1.439 /* Here the PAE is should to be opened */ 1.440 VMX_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n"); 1.441 set_bit(VMX_CPU_STATE_LMA_ENABLED, 1.442 - &d->arch.arch_vmx.cpu_state); 1.443 + &v->arch.arch_vmx.cpu_state); 1.444 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value); 1.445 vm_entry_value |= VM_ENTRY_CONTROLS_IA32E_MODE; 1.446 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value); 1.447 1.448 -#if CONFIG_PAGING_LEVELS >= 4 1.449 - if(!shadow_set_guest_paging_levels(d->domain, 4)) { 1.450 +#if CONFIG_PAGING_LEVELS >= 4 1.451 + if(!shadow_set_guest_paging_levels(v->domain, 4)) { 1.452 printk("Unsupported guest paging levels\n"); 1.453 domain_crash_synchronous(); /* need to take a clean path */ 1.454 } 1.455 @@ -1067,7 +1067,7 @@ static int vmx_set_cr0(unsigned long val 1.456 else 1.457 { 1.458 #if CONFIG_PAGING_LEVELS >= 4 1.459 - if(!shadow_set_guest_paging_levels(d->domain, 2)) { 1.460 + if(!shadow_set_guest_paging_levels(v->domain, 2)) { 1.461 printk("Unsupported guest paging levels\n"); 1.462 domain_crash_synchronous(); /* need to take a clean path */ 1.463 } 1.464 @@ -1079,7 +1079,7 @@ static int vmx_set_cr0(unsigned long val 1.465 __vmread(GUEST_CR4, &crn); 1.466 if ( (!(crn & X86_CR4_PAE)) && 1.467 test_bit(VMX_CPU_STATE_PAE_ENABLED, 1.468 - &d->arch.arch_vmx.cpu_state)){ 1.469 + &v->arch.arch_vmx.cpu_state)){ 1.470 VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n"); 1.471 __vmwrite(GUEST_CR4, crn | X86_CR4_PAE); 1.472 } 1.473 @@ -1087,24 +1087,24 @@ static int vmx_set_cr0(unsigned long val 1.474 /* 1.475 * Now arch.guest_table points to machine physical. 1.476 */ 1.477 - d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); 1.478 - update_pagetables(d); 1.479 + v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); 1.480 + update_pagetables(v); 1.481 1.482 - VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 1.483 + VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 1.484 (unsigned long) (mfn << PAGE_SHIFT)); 1.485 1.486 - __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table)); 1.487 - /* 1.488 + __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table)); 1.489 + /* 1.490 * arch->shadow_table should hold the next CR3 for shadow 1.491 */ 1.492 - VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx", 1.493 - d->arch.arch_vmx.cpu_cr3, mfn); 1.494 + VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx", 1.495 + v->arch.arch_vmx.cpu_cr3, mfn); 1.496 } 1.497 1.498 if(!((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled) 1.499 - if(d->arch.arch_vmx.cpu_cr3) 1.500 + if(v->arch.arch_vmx.cpu_cr3) 1.501 put_page(pfn_to_page(get_mfn_from_pfn( 1.502 - d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT))); 1.503 + v->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT))); 1.504 1.505 /* 1.506 * VMX does not implement real-mode virtualization. We emulate 1.507 @@ -1114,38 +1114,38 @@ static int vmx_set_cr0(unsigned long val 1.508 if ((value & X86_CR0_PE) == 0) { 1.509 if ( value & X86_CR0_PG ) { 1.510 /* inject GP here */ 1.511 - vmx_inject_exception(d, TRAP_gp_fault, 0); 1.512 + vmx_inject_exception(v, TRAP_gp_fault, 0); 1.513 return 0; 1.514 } else { 1.515 - /* 1.516 + /* 1.517 * Disable paging here. 1.518 * Same to PE == 1 && PG == 0 1.519 */ 1.520 if (test_bit(VMX_CPU_STATE_LMA_ENABLED, 1.521 - &d->arch.arch_vmx.cpu_state)){ 1.522 + &v->arch.arch_vmx.cpu_state)){ 1.523 clear_bit(VMX_CPU_STATE_LMA_ENABLED, 1.524 - &d->arch.arch_vmx.cpu_state); 1.525 + &v->arch.arch_vmx.cpu_state); 1.526 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value); 1.527 vm_entry_value &= ~VM_ENTRY_CONTROLS_IA32E_MODE; 1.528 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value); 1.529 } 1.530 } 1.531 1.532 - if (vmx_assist(d, VMX_ASSIST_INVOKE)) { 1.533 - set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state); 1.534 + if (vmx_assist(v, VMX_ASSIST_INVOKE)) { 1.535 + set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &v->arch.arch_vmx.cpu_state); 1.536 __vmread(GUEST_RIP, &eip); 1.537 VMX_DBG_LOG(DBG_LEVEL_1, 1.538 "Transfering control to vmxassist %%eip 0x%lx\n", eip); 1.539 return 0; /* do not update eip! */ 1.540 } 1.541 } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED, 1.542 - &d->arch.arch_vmx.cpu_state)) { 1.543 + &v->arch.arch_vmx.cpu_state)) { 1.544 __vmread(GUEST_RIP, &eip); 1.545 VMX_DBG_LOG(DBG_LEVEL_1, 1.546 "Enabling CR0.PE at %%eip 0x%lx\n", eip); 1.547 - if (vmx_assist(d, VMX_ASSIST_RESTORE)) { 1.548 + if (vmx_assist(v, VMX_ASSIST_RESTORE)) { 1.549 clear_bit(VMX_CPU_STATE_ASSIST_ENABLED, 1.550 - &d->arch.arch_vmx.cpu_state); 1.551 + &v->arch.arch_vmx.cpu_state); 1.552 __vmread(GUEST_RIP, &eip); 1.553 VMX_DBG_LOG(DBG_LEVEL_1, 1.554 "Restoring to %%eip 0x%lx\n", eip); 1.555 @@ -1186,7 +1186,7 @@ static int mov_to_cr(int gp, int cr, str 1.556 { 1.557 unsigned long value; 1.558 unsigned long old_cr; 1.559 - struct vcpu *d = current; 1.560 + struct vcpu *v = current; 1.561 1.562 switch (gp) { 1.563 CASE_GET_REG(EAX, eax); 1.564 @@ -1204,82 +1204,82 @@ static int mov_to_cr(int gp, int cr, str 1.565 printk("invalid gp: %d\n", gp); 1.566 __vmx_bug(regs); 1.567 } 1.568 - 1.569 + 1.570 VMX_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value); 1.571 VMX_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current); 1.572 1.573 switch(cr) { 1.574 - case 0: 1.575 + case 0: 1.576 { 1.577 return vmx_set_cr0(value); 1.578 } 1.579 - case 3: 1.580 + case 3: 1.581 { 1.582 unsigned long old_base_mfn, mfn; 1.583 1.584 /* 1.585 * If paging is not enabled yet, simply copy the value to CR3. 1.586 */ 1.587 - if (!vmx_paging_enabled(d)) { 1.588 - d->arch.arch_vmx.cpu_cr3 = value; 1.589 + if (!vmx_paging_enabled(v)) { 1.590 + v->arch.arch_vmx.cpu_cr3 = value; 1.591 break; 1.592 } 1.593 - 1.594 + 1.595 /* 1.596 * We make a new one if the shadow does not exist. 1.597 */ 1.598 - if (value == d->arch.arch_vmx.cpu_cr3) { 1.599 - /* 1.600 - * This is simple TLB flush, implying the guest has 1.601 + if (value == v->arch.arch_vmx.cpu_cr3) { 1.602 + /* 1.603 + * This is simple TLB flush, implying the guest has 1.604 * removed some translation or changed page attributes. 1.605 * We simply invalidate the shadow. 1.606 */ 1.607 mfn = get_mfn_from_pfn(value >> PAGE_SHIFT); 1.608 - if (mfn != pagetable_get_pfn(d->arch.guest_table)) 1.609 + if (mfn != pagetable_get_pfn(v->arch.guest_table)) 1.610 __vmx_bug(regs); 1.611 - shadow_sync_all(d->domain); 1.612 + shadow_sync_all(v->domain); 1.613 } else { 1.614 /* 1.615 * If different, make a shadow. Check if the PDBR is valid 1.616 * first. 1.617 */ 1.618 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value); 1.619 - if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) || 1.620 + if ( ((value >> PAGE_SHIFT) > v->domain->max_pages ) || 1.621 !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) || 1.622 - !get_page(pfn_to_page(mfn), d->domain) ) 1.623 + !get_page(pfn_to_page(mfn), v->domain) ) 1.624 { 1.625 printk("Invalid CR3 value=%lx", value); 1.626 domain_crash_synchronous(); /* need to take a clean path */ 1.627 } 1.628 - old_base_mfn = pagetable_get_pfn(d->arch.guest_table); 1.629 - d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); 1.630 + old_base_mfn = pagetable_get_pfn(v->arch.guest_table); 1.631 + v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); 1.632 if (old_base_mfn) 1.633 put_page(pfn_to_page(old_base_mfn)); 1.634 - update_pagetables(d); 1.635 - /* 1.636 + update_pagetables(v); 1.637 + /* 1.638 * arch.shadow_table should now hold the next CR3 for shadow 1.639 */ 1.640 - d->arch.arch_vmx.cpu_cr3 = value; 1.641 + v->arch.arch_vmx.cpu_cr3 = value; 1.642 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", 1.643 value); 1.644 - __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table)); 1.645 + __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table)); 1.646 } 1.647 break; 1.648 } 1.649 - case 4: 1.650 + case 4: 1.651 { 1.652 /* CR4 */ 1.653 unsigned long old_guest_cr; 1.654 1.655 __vmread(GUEST_CR4, &old_guest_cr); 1.656 if (value & X86_CR4_PAE){ 1.657 - set_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state); 1.658 + set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.arch_vmx.cpu_state); 1.659 } else { 1.660 if (test_bit(VMX_CPU_STATE_LMA_ENABLED, 1.661 - &d->arch.arch_vmx.cpu_state)){ 1.662 - vmx_inject_exception(d, TRAP_gp_fault, 0); 1.663 + &v->arch.arch_vmx.cpu_state)){ 1.664 + vmx_inject_exception(v, TRAP_gp_fault, 0); 1.665 } 1.666 - clear_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state); 1.667 + clear_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.arch_vmx.cpu_state); 1.668 } 1.669 1.670 __vmread(CR4_READ_SHADOW, &old_cr); 1.671 @@ -1292,7 +1292,7 @@ static int mov_to_cr(int gp, int cr, str 1.672 * all TLB entries except global entries. 1.673 */ 1.674 if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) { 1.675 - shadow_sync_all(d->domain); 1.676 + shadow_sync_all(v->domain); 1.677 } 1.678 break; 1.679 } 1.680 @@ -1315,12 +1315,12 @@ static int mov_to_cr(int gp, int cr, str 1.681 static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs) 1.682 { 1.683 unsigned long value; 1.684 - struct vcpu *d = current; 1.685 + struct vcpu *v = current; 1.686 1.687 if (cr != 3) 1.688 __vmx_bug(regs); 1.689 1.690 - value = (unsigned long) d->arch.arch_vmx.cpu_cr3; 1.691 + value = (unsigned long) v->arch.arch_vmx.cpu_cr3; 1.692 1.693 switch (gp) { 1.694 CASE_SET_REG(EAX, eax); 1.695 @@ -1396,7 +1396,7 @@ static inline void vmx_do_msr_read(struc 1.696 u64 msr_content = 0; 1.697 1.698 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx", 1.699 - (unsigned long)regs->ecx, (unsigned long)regs->eax, 1.700 + (unsigned long)regs->ecx, (unsigned long)regs->eax, 1.701 (unsigned long)regs->edx); 1.702 switch (regs->ecx) { 1.703 case MSR_IA32_SYSENTER_CS: 1.704 @@ -1429,7 +1429,7 @@ static inline void vmx_do_msr_write(stru 1.705 u64 msr_content; 1.706 1.707 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write: ecx=%lx, eax=%lx, edx=%lx", 1.708 - (unsigned long)regs->ecx, (unsigned long)regs->eax, 1.709 + (unsigned long)regs->ecx, (unsigned long)regs->eax, 1.710 (unsigned long)regs->edx); 1.711 1.712 msr_content = (regs->eax & 0xFFFFFFFF) | ((u64)regs->edx << 32); 1.713 @@ -1516,7 +1516,7 @@ static inline void vmx_vmexit_do_extint( 1.714 char print_buf[BUF_SIZ]; 1.715 static int index; 1.716 1.717 -static void vmx_print_line(const char c, struct vcpu *d) 1.718 +static void vmx_print_line(const char c, struct vcpu *v) 1.719 { 1.720 1.721 if (index == MAX_LINE || c == '\n') { 1.722 @@ -1524,7 +1524,7 @@ static void vmx_print_line(const char c, 1.723 print_buf[index++] = c; 1.724 } 1.725 print_buf[index] = '\0'; 1.726 - printk("(GUEST: %u) %s\n", d->domain->domain_id, (char *) &print_buf); 1.727 + printk("(GUEST: %u) %s\n", v->domain->domain_id, (char *) &print_buf); 1.728 index = 0; 1.729 } 1.730 else 1.731 @@ -1584,7 +1584,7 @@ asmlinkage void vmx_vmexit_handler(struc 1.732 1.733 if ((error = __vmread(VM_EXIT_REASON, &exit_reason))) 1.734 __vmx_bug(®s); 1.735 - 1.736 + 1.737 perfc_incra(vmexits, exit_reason); 1.738 1.739 __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field); 1.740 @@ -1592,14 +1592,14 @@ asmlinkage void vmx_vmexit_handler(struc 1.741 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field); 1.742 1.743 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len); 1.744 - if (inst_len >= 1 && inst_len <= 15) 1.745 + if (inst_len >= 1 && inst_len <= 15) 1.746 __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len); 1.747 1.748 if (idtv_info_field & 0x800) { /* valid error code */ 1.749 unsigned long error_code; 1.750 __vmread(IDT_VECTORING_ERROR_CODE, &error_code); 1.751 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 1.752 - } 1.753 + } 1.754 1.755 VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field); 1.756 } 1.757 @@ -1612,7 +1612,7 @@ asmlinkage void vmx_vmexit_handler(struc 1.758 1.759 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { 1.760 printk("Failed vm entry\n"); 1.761 - domain_crash_synchronous(); 1.762 + domain_crash_synchronous(); 1.763 return; 1.764 } 1.765 1.766 @@ -1628,7 +1628,7 @@ asmlinkage void vmx_vmexit_handler(struc 1.767 case EXIT_REASON_EXCEPTION_NMI: 1.768 { 1.769 /* 1.770 - * We don't set the software-interrupt exiting (INT n). 1.771 + * We don't set the software-interrupt exiting (INT n). 1.772 * (1) We can get an exception (e.g. #PG) in the guest, or 1.773 * (2) NMI 1.774 */ 1.775 @@ -1680,17 +1680,17 @@ asmlinkage void vmx_vmexit_handler(struc 1.776 case TRAP_no_device: 1.777 { 1.778 vmx_do_no_device_fault(); 1.779 - break; 1.780 + break; 1.781 } 1.782 case TRAP_page_fault: 1.783 { 1.784 __vmread(EXIT_QUALIFICATION, &va); 1.785 __vmread(VM_EXIT_INTR_ERROR_CODE, ®s.error_code); 1.786 - 1.787 + 1.788 TRACE_VMEXIT(3,regs.error_code); 1.789 TRACE_VMEXIT(4,va); 1.790 1.791 - VMX_DBG_LOG(DBG_LEVEL_VMMU, 1.792 + VMX_DBG_LOG(DBG_LEVEL_VMMU, 1.793 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx", 1.794 (unsigned long)regs.eax, (unsigned long)regs.ebx, 1.795 (unsigned long)regs.ecx, (unsigned long)regs.edx, 1.796 @@ -1716,11 +1716,11 @@ asmlinkage void vmx_vmexit_handler(struc 1.797 } 1.798 break; 1.799 } 1.800 - case EXIT_REASON_EXTERNAL_INTERRUPT: 1.801 + case EXIT_REASON_EXTERNAL_INTERRUPT: 1.802 vmx_vmexit_do_extint(®s); 1.803 break; 1.804 case EXIT_REASON_PENDING_INTERRUPT: 1.805 - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, 1.806 + __vmwrite(CPU_BASED_VM_EXEC_CONTROL, 1.807 MONITOR_CPU_BASED_EXEC_CONTROLS); 1.808 break; 1.809 case EXIT_REASON_TASK_SWITCH: 1.810 @@ -1760,7 +1760,7 @@ asmlinkage void vmx_vmexit_handler(struc 1.811 __get_instruction_length(inst_len); 1.812 __vmread(EXIT_QUALIFICATION, &exit_qualification); 1.813 1.814 - VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx", 1.815 + VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx", 1.816 eip, inst_len, exit_qualification); 1.817 if (vmx_cr_access(exit_qualification, ®s)) 1.818 __update_guest_eip(inst_len); 1.819 @@ -1769,7 +1769,7 @@ asmlinkage void vmx_vmexit_handler(struc 1.820 break; 1.821 } 1.822 case EXIT_REASON_DR_ACCESS: 1.823 - __vmread(EXIT_QUALIFICATION, &exit_qualification); 1.824 + __vmread(EXIT_QUALIFICATION, &exit_qualification); 1.825 vmx_dr_access(exit_qualification, ®s); 1.826 __get_instruction_length(inst_len); 1.827 __update_guest_eip(inst_len); 1.828 @@ -1801,13 +1801,13 @@ asmlinkage void vmx_vmexit_handler(struc 1.829 1.830 asmlinkage void load_cr2(void) 1.831 { 1.832 - struct vcpu *d = current; 1.833 + struct vcpu *v = current; 1.834 1.835 - local_irq_disable(); 1.836 + local_irq_disable(); 1.837 #ifdef __i386__ 1.838 - asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2)); 1.839 + asm volatile("movl %0,%%cr2": :"r" (v->arch.arch_vmx.cpu_cr2)); 1.840 #else 1.841 - asm volatile("movq %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2)); 1.842 + asm volatile("movq %0,%%cr2": :"r" (v->arch.arch_vmx.cpu_cr2)); 1.843 #endif 1.844 } 1.845 1.846 @@ -1829,7 +1829,7 @@ asmlinkage void trace_vmexit (void) 1.847 TRACE_3D(TRC_VMEXIT,0,0,0); 1.848 return; 1.849 } 1.850 -#endif 1.851 +#endif 1.852 #endif /* CONFIG_VMX */ 1.853 1.854 /*