]> xenbits.xensource.com Git - people/aperard/xen-arm.git/commitdiff
nested vmx: synchronize page fault error code match and mask
authorDongxiao Xu <dongxiao.xu@intel.com>
Tue, 8 Jan 2013 09:43:35 +0000 (10:43 +0100)
committerDongxiao Xu <dongxiao.xu@intel.com>
Tue, 8 Jan 2013 09:43:35 +0000 (10:43 +0100)
Page fault is specially handled not only with exception bitmaps,
but also with consideration of page fault error code mask/match
values. Therefore in nested virtualization case, the two values
need to be synchronized from virtual VMCS to shadow VMCS.

Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
Committed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/hvm/vmx/vvmx.c

index 16fb3fd1ae846e6057bfd927e8251983e4536946..0f13884b2aaac0e05868e6e5bbd3f039ac50ad39 100644 (file)
@@ -603,6 +603,17 @@ static void nvmx_update_tpr_threshold(struct vcpu *v)
         __vmwrite(TPR_THRESHOLD, 0);
 }
 
+static void nvmx_update_pfec(struct vcpu *v)
+{
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    void *vvmcs = nvcpu->nv_vvmcx;
+
+    __vmwrite(PAGE_FAULT_ERROR_CODE_MASK,
+        __get_vvmcs(vvmcs, PAGE_FAULT_ERROR_CODE_MASK));
+    __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH,
+        __get_vvmcs(vvmcs, PAGE_FAULT_ERROR_CODE_MATCH));
+}
+
 static void __clear_current_vvmcs(struct vcpu *v)
 {
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
@@ -813,6 +824,7 @@ static void load_shadow_control(struct vcpu *v)
     nvmx_update_apic_access_address(v);
     nvmx_update_virtual_apic_address(v);
     nvmx_update_tpr_threshold(v);
+    nvmx_update_pfec(v);
 }
 
 static void load_shadow_guest_state(struct vcpu *v)