#include <asm/hvm/svm/svm.h>
#include <asm/hvm/svm/intr.h>
#include <asm/hvm/nestedhvm.h> /* for nestedhvm_vcpu_in_guestmode */
+#include <asm/vm_event.h>
#include <xen/event.h>
#include <xen/kernel.h>
#include <public/hvm/ioreq.h>
struct hvm_intack intack;
enum hvm_intblk intblk;
+ /* Block event injection while handling a sync vm_event. */
+ if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event )
+ return;
+
/* Crank the handle on interrupt state. */
pt_update_irq(v);
w->do_write.msr = 0;
}
+
+ vm_event_sync_event(v, false);
}
/*
#include <asm/hvm/nestedhvm.h>
#include <public/hvm/ioreq.h>
#include <asm/hvm/trace.h>
+#include <asm/vm_event.h>
/*
* A few notes on virtual NMI and INTR delivery, and interactions with
return;
}
+ /* Block event injection while handling a sync vm_event. */
+ if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event )
+ return;
+
/* Crank the handle on interrupt state. */
pt_vector = pt_update_irq(v);
v->arch.monitor.next_interrupt_enabled = true;
}
+void vm_event_sync_event(struct vcpu *v, bool value)
+{
+ v->arch.vm_event->sync_event = value;
+}
+
#ifdef CONFIG_HVM
static void vm_event_pack_segment_register(enum x86_segment segment,
struct vm_event_regs_x86 *reg)
if ( sync )
{
req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
+ vm_event_sync_event(v, true);
vm_event_vcpu_pause(v);
rc = 1;
}
/* Not supported on ARM. */
}
+static inline
+void vm_event_sync_event(struct vcpu *v, bool value)
+{
+ /* Not supported on ARM. */
+}
+
#endif /* __ASM_ARM_VM_EVENT_H__ */
struct monitor_write_data write_data;
struct vm_event_regs_x86 gprs;
bool set_gprs;
+ /* A sync vm_event has been sent and we're not done handling it. */
+ bool sync_event;
};
int vm_event_init_domain(struct domain *d);
void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp);
+void vm_event_sync_event(struct vcpu *v, bool value);
+
#endif /* __ASM_X86_VM_EVENT_H__ */