__clear_bit(msr * 2 + 1, msr_bit);
}
+static void svm_enable_msr_interception(struct domain *d, uint32_t msr)
+{
+ struct vcpu *v;
+
+ for_each_vcpu ( d, v )
+ svm_intercept_msr(v, msr, MSR_INTERCEPT_WRITE);
+}
+
static void svm_save_dr(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
.fpu_dirty_intercept = svm_fpu_dirty_intercept,
.msr_read_intercept = svm_msr_read_intercept,
.msr_write_intercept = svm_msr_write_intercept,
+ .enable_msr_interception = svm_enable_msr_interception,
.set_rdtsc_exiting = svm_set_rdtsc_exiting,
.set_descriptor_access_exiting = svm_set_descriptor_access_exiting,
.get_insn_bytes = svm_get_insn_bytes,
return capabilities;
capabilities = ((1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST) |
- (1U << XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT));
+ (1U << XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT) |
+ (1U << XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR));
if ( cpu_has_vmx )
{
capabilities |= ((1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG) |
- (1U << XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR) |
(1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) |
(1U << XEN_DOMCTL_MONITOR_EVENT_CPUID) |
(1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT) |