]> xenbits.xensource.com Git - xen.git/commitdiff
VMX: fix PAT value seen by guest
authorJan Beulich <jbeulich@suse.com>
Wed, 9 Apr 2014 09:52:21 +0000 (11:52 +0200)
committerJan Beulich <jbeulich@suse.com>
Wed, 9 Apr 2014 09:52:21 +0000 (11:52 +0200)
The XSA-60 fixes introduced a window during which the guest PAT gets
forced to all zeros. This shouldn't be visible to the guest. Therefore
we need to intercept PAT MSR accesses during that time period.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Liu Jinsong <jinsong.liu@intel.com>
master commit: fce79f8ce91dc45f3a4d699ee67c49e6cbeb1197
master date: 2014-04-01 16:49:18 +0200

xen/arch/x86/hvm/vmx/vmcs.c
xen/arch/x86/hvm/vmx/vmx.c
xen/include/asm-x86/hvm/vmx/vmcs.h

index 6042dd16a529a40b888590b6a9d20bf0335bbd38..b09e8d79652262ca06ca8e236dcccea7d81e79df 100644 (file)
@@ -682,6 +682,32 @@ void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr)
     }
 }
 
+void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr)
+{
+    unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
+
+    /* VMX MSR bitmap supported? */
+    if ( msr_bitmap == NULL )
+        return;
+
+    /*
+     * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+     * have the write-low and read-high bitmap offsets the wrong way round.
+     * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+     */
+    if ( msr <= 0x1fff )
+    {
+        set_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */
+        set_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */
+    }
+    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+    {
+        msr &= 0x1fff;
+        set_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */
+        set_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */
+    }
+}
+
 /*
  * Switch VMCS between layer 1 & 2 guest
  */
index 1bf0297c30e98c31bd01fa8ac3d91ac7c42cb284..ec85b8d1357c9341cdf6f3de6786ae0db5cc9ab9 100644 (file)
@@ -1033,6 +1033,7 @@ static void vmx_handle_cd(struct vcpu *v, unsigned long value)
 
             vmx_get_guest_pat(v, pat);
             vmx_set_guest_pat(v, uc_pat);
+            vmx_enable_intercept_for_msr(v, MSR_IA32_CR_PAT);
 
             wbinvd();               /* flush possibly polluted cache */
             hvm_asid_flush_vcpu(v); /* invalidate memory type cached in TLB */
@@ -1042,6 +1043,8 @@ static void vmx_handle_cd(struct vcpu *v, unsigned long value)
         {
             v->arch.hvm_vcpu.cache_mode = NORMAL_CACHE_MODE;
             vmx_set_guest_pat(v, *pat);
+            if ( !iommu_enabled || iommu_snoop )
+                vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT);
             hvm_asid_flush_vcpu(v); /* no need to flush cache */
         }
     }
index 7c51b497031c93a57cc18434f9d068e95d973326..ad11057a38e10051cdcb10389b17abdea3be5729 100644 (file)
@@ -391,6 +391,7 @@ enum vmcs_field {
 #define VMCS_VPID_WIDTH 16
 
 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr);
+void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr);
 int vmx_read_guest_msr(u32 msr, u64 *val);
 int vmx_write_guest_msr(u32 msr, u64 val);
 int vmx_add_guest_msr(u32 msr);