The XSA-60 fixes introduced a window during which the guest PAT gets
forced to all zeros. This shouldn't be visible to the guest. Therefore
we need to intercept PAT MSR accesses during that time period.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Liu Jinsong <jinsong.liu@intel.com>
master commit:
fce79f8ce91dc45f3a4d699ee67c49e6cbeb1197
master date: 2014-04-01 16:49:18 +0200
}
}
+void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr)
+{
+ unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
+
+ /* VMX MSR bitmap supported? */
+ if ( msr_bitmap == NULL )
+ return;
+
+ /*
+ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+ * have the write-low and read-high bitmap offsets the wrong way round.
+ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+ */
+ if ( msr <= 0x1fff )
+ {
+ set_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */
+ set_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */
+ }
+ else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+ {
+ msr &= 0x1fff;
+ set_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */
+ set_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */
+ }
+}
+
/*
* Switch VMCS between layer 1 & 2 guest
*/
vmx_get_guest_pat(v, pat);
vmx_set_guest_pat(v, uc_pat);
+ vmx_enable_intercept_for_msr(v, MSR_IA32_CR_PAT);
wbinvd(); /* flush possibly polluted cache */
hvm_asid_flush_vcpu(v); /* invalidate memory type cached in TLB */
{
v->arch.hvm_vcpu.cache_mode = NORMAL_CACHE_MODE;
vmx_set_guest_pat(v, *pat);
+ if ( !iommu_enabled || iommu_snoop )
+ vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT);
hvm_asid_flush_vcpu(v); /* no need to flush cache */
}
}
#define VMCS_VPID_WIDTH 16
void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr);
+void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr);
int vmx_read_guest_msr(u32 msr, u64 *val);
int vmx_write_guest_msr(u32 msr, u64 val);
int vmx_add_guest_msr(u32 msr);