]> xenbits.xensource.com Git - xen.git/commitdiff
mem_access: support for memory-content hiding
authorRazvan Cojocaru <rcojocaru@bitdefender.com>
Wed, 15 Jul 2015 14:08:03 +0000 (16:08 +0200)
committerJan Beulich <jbeulich@suse.com>
Wed, 15 Jul 2015 14:08:03 +0000 (16:08 +0200)
This patch adds support for memory-content hiding, by modifying the
value returned by emulated instructions that read certain memory
addresses that contain sensitive data. The patch only applies to
cases where VM_FLAG_ACCESS_EMULATE has been set to a vm_event
response.

Signed-off-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
Acked-by: Tamas K Lengyel <tlengyel@novetta.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
Release-acked-by: Wei Liu <wei.liu2@citrix.com>
12 files changed:
tools/tests/xen-access/xen-access.c
xen/arch/x86/domain.c
xen/arch/x86/hvm/emulate.c
xen/arch/x86/hvm/event.c
xen/arch/x86/mm/p2m.c
xen/arch/x86/vm_event.c
xen/common/vm_event.c
xen/include/asm-arm/vm_event.h
xen/include/asm-x86/domain.h
xen/include/asm-x86/hvm/emulate.h
xen/include/asm-x86/vm_event.h
xen/include/public/vm_event.h

index 12ab9213508fe6174179583b12e5b0b45b6a8711..e6ca9bafde7570d52fd53ac5557d42b2015d0d69 100644 (file)
@@ -530,7 +530,7 @@ int main(int argc, char *argv[])
                 break;
             case VM_EVENT_REASON_SOFTWARE_BREAKPOINT:
                 printf("Breakpoint: rip=%016"PRIx64", gfn=%"PRIx64" (vcpu %d)\n",
-                       req.regs.x86.rip,
+                       req.data.regs.x86.rip,
                        req.u.software_breakpoint.gfn,
                        req.vcpu_id);
 
index 34ecd7cfe46ec5f36ec5e983c8b411c50db897a9..1ef9fadd5689211e40277149ae1bd45359b951a1 100644 (file)
@@ -511,6 +511,9 @@ int vcpu_initialise(struct vcpu *v)
 
 void vcpu_destroy(struct vcpu *v)
 {
+    xfree(v->arch.vm_event.emul_read_data);
+    v->arch.vm_event.emul_read_data = NULL;
+
     if ( is_pv_32bit_vcpu(v) )
     {
         free_compat_arg_xlat(v);
index 795321c2b0b789d90c5b18b3b0474b2ffbc15b8e..e21f5e104c0a0256bf855734ace5ef65964f1e5b 100644 (file)
@@ -67,6 +67,23 @@ static int null_write(const struct hvm_io_handler *handler,
     return X86EMUL_OKAY;
 }
 
+static int set_context_data(void *buffer, unsigned int size)
+{
+    struct vcpu *curr = current;
+
+    if ( curr->arch.vm_event.emul_read_data )
+    {
+        unsigned int safe_size =
+            min(size, curr->arch.vm_event.emul_read_data->size);
+
+        memcpy(buffer, curr->arch.vm_event.emul_read_data->data, safe_size);
+        memset(buffer + safe_size, 0, size - safe_size);
+        return X86EMUL_OKAY;
+    }
+
+    return X86EMUL_UNHANDLEABLE;
+}
+
 static const struct hvm_io_ops null_ops = {
     .read = null_read,
     .write = null_write
@@ -771,6 +788,12 @@ static int hvmemul_read(
     unsigned int bytes,
     struct x86_emulate_ctxt *ctxt)
 {
+    struct hvm_emulate_ctxt *hvmemul_ctxt =
+        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+
+    if ( unlikely(hvmemul_ctxt->set_context) )
+        return set_context_data(p_data, bytes);
+
     return __hvmemul_read(
         seg, offset, p_data, bytes, hvm_access_read,
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt));
@@ -963,6 +986,17 @@ static int hvmemul_cmpxchg(
     unsigned int bytes,
     struct x86_emulate_ctxt *ctxt)
 {
+    struct hvm_emulate_ctxt *hvmemul_ctxt =
+        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+
+    if ( unlikely(hvmemul_ctxt->set_context) )
+    {
+        int rc = set_context_data(p_new, bytes);
+
+        if ( rc != X86EMUL_OKAY )
+            return rc;
+    }
+
     /* Fix this in case the guest is really relying on r-m-w atomicity. */
     return hvmemul_write(seg, offset, p_new, bytes, ctxt);
 }
@@ -1005,6 +1039,33 @@ static int hvmemul_rep_ins(
                                !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
 }
 
+static int hvmemul_rep_outs_set_context(
+    enum x86_segment src_seg,
+    unsigned long src_offset,
+    uint16_t dst_port,
+    unsigned int bytes_per_rep,
+    unsigned long *reps,
+    struct x86_emulate_ctxt *ctxt)
+{
+    unsigned int bytes = *reps * bytes_per_rep;
+    char *buf;
+    int rc;
+
+    buf = xmalloc_array(char, bytes);
+
+    if ( buf == NULL )
+        return X86EMUL_UNHANDLEABLE;
+
+    rc = set_context_data(buf, bytes);
+
+    if ( rc == X86EMUL_OKAY )
+        rc = hvmemul_do_pio_buffer(dst_port, bytes, IOREQ_WRITE, buf);
+
+    xfree(buf);
+
+    return rc;
+}
+
 static int hvmemul_rep_outs(
     enum x86_segment src_seg,
     unsigned long src_offset,
@@ -1021,6 +1082,10 @@ static int hvmemul_rep_outs(
     p2m_type_t p2mt;
     int rc;
 
+    if ( unlikely(hvmemul_ctxt->set_context) )
+        return hvmemul_rep_outs_set_context(src_seg, src_offset, dst_port,
+                                            bytes_per_rep, reps, ctxt);
+
     rc = hvmemul_virtual_to_linear(
         src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,
         hvmemul_ctxt, &addr);
@@ -1127,11 +1192,26 @@ static int hvmemul_rep_movs(
     if ( buf == NULL )
         return X86EMUL_UNHANDLEABLE;
 
-    /*
-     * We do a modicum of checking here, just for paranoia's sake and to
-     * definitely avoid copying an unitialised buffer into guest address space.
-     */
-    rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
+    if ( unlikely(hvmemul_ctxt->set_context) )
+    {
+        rc = set_context_data(buf, bytes);
+
+        if ( rc != X86EMUL_OKAY)
+        {
+            xfree(buf);
+            return rc;
+        }
+
+        rc = HVMCOPY_okay;
+    }
+    else
+        /*
+         * We do a modicum of checking here, just for paranoia's sake and to
+         * definitely avoid copying an unitialised buffer into guest address
+         * space.
+         */
+        rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
+
     if ( rc == HVMCOPY_okay )
         rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
 
@@ -1292,7 +1372,14 @@ static int hvmemul_read_io(
     unsigned long *val,
     struct x86_emulate_ctxt *ctxt)
 {
+    struct hvm_emulate_ctxt *hvmemul_ctxt =
+        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+
     *val = 0;
+
+    if ( unlikely(hvmemul_ctxt->set_context) )
+        return set_context_data(val, bytes);
+
     return hvmemul_do_pio_buffer(port, bytes, IOREQ_READ, val);
 }
 
@@ -1677,7 +1764,7 @@ int hvm_emulate_one_no_write(
     return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops_no_write);
 }
 
-void hvm_mem_access_emulate_one(bool_t nowrite, unsigned int trapnr,
+void hvm_mem_access_emulate_one(enum emul_kind kind, unsigned int trapnr,
     unsigned int errcode)
 {
     struct hvm_emulate_ctxt ctx = {{ 0 }};
@@ -1685,10 +1772,17 @@ void hvm_mem_access_emulate_one(bool_t nowrite, unsigned int trapnr,
 
     hvm_emulate_prepare(&ctx, guest_cpu_user_regs());
 
-    if ( nowrite )
+    switch ( kind )
+    {
+    case EMUL_KIND_NOWRITE:
         rc = hvm_emulate_one_no_write(&ctx);
-    else
+        break;
+    case EMUL_KIND_SET_CONTEXT:
+        ctx.set_context = 1;
+        /* Intentional fall-through. */
+    default:
         rc = hvm_emulate_one(&ctx);
+    }
 
     switch ( rc )
     {
@@ -1722,6 +1816,7 @@ void hvm_emulate_prepare(
     hvmemul_ctxt->ctxt.force_writeback = 1;
     hvmemul_ctxt->seg_reg_accessed = 0;
     hvmemul_ctxt->seg_reg_dirty = 0;
+    hvmemul_ctxt->set_context = 0;
     hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
     hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
 }
index 53b9ca401d02873711623da198bcffce0b537404..53419371bbf9b0e4e3ead850ef8df9578c896f93 100644 (file)
@@ -30,31 +30,31 @@ static void hvm_event_fill_regs(vm_event_request_t *req)
     const struct cpu_user_regs *regs = guest_cpu_user_regs();
     const struct vcpu *curr = current;
 
-    req->regs.x86.rax = regs->eax;
-    req->regs.x86.rcx = regs->ecx;
-    req->regs.x86.rdx = regs->edx;
-    req->regs.x86.rbx = regs->ebx;
-    req->regs.x86.rsp = regs->esp;
-    req->regs.x86.rbp = regs->ebp;
-    req->regs.x86.rsi = regs->esi;
-    req->regs.x86.rdi = regs->edi;
-
-    req->regs.x86.r8  = regs->r8;
-    req->regs.x86.r9  = regs->r9;
-    req->regs.x86.r10 = regs->r10;
-    req->regs.x86.r11 = regs->r11;
-    req->regs.x86.r12 = regs->r12;
-    req->regs.x86.r13 = regs->r13;
-    req->regs.x86.r14 = regs->r14;
-    req->regs.x86.r15 = regs->r15;
-
-    req->regs.x86.rflags = regs->eflags;
-    req->regs.x86.rip    = regs->eip;
-
-    req->regs.x86.msr_efer = curr->arch.hvm_vcpu.guest_efer;
-    req->regs.x86.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
-    req->regs.x86.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
-    req->regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
+    req->data.regs.x86.rax = regs->eax;
+    req->data.regs.x86.rcx = regs->ecx;
+    req->data.regs.x86.rdx = regs->edx;
+    req->data.regs.x86.rbx = regs->ebx;
+    req->data.regs.x86.rsp = regs->esp;
+    req->data.regs.x86.rbp = regs->ebp;
+    req->data.regs.x86.rsi = regs->esi;
+    req->data.regs.x86.rdi = regs->edi;
+
+    req->data.regs.x86.r8  = regs->r8;
+    req->data.regs.x86.r9  = regs->r9;
+    req->data.regs.x86.r10 = regs->r10;
+    req->data.regs.x86.r11 = regs->r11;
+    req->data.regs.x86.r12 = regs->r12;
+    req->data.regs.x86.r13 = regs->r13;
+    req->data.regs.x86.r14 = regs->r14;
+    req->data.regs.x86.r15 = regs->r15;
+
+    req->data.regs.x86.rflags = regs->eflags;
+    req->data.regs.x86.rip    = regs->eip;
+
+    req->data.regs.x86.msr_efer = curr->arch.hvm_vcpu.guest_efer;
+    req->data.regs.x86.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
+    req->data.regs.x86.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
+    req->data.regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
 }
 
 static int hvm_event_traps(uint8_t sync, vm_event_request_t *req)
index f180cee8035d2a33b0249c6cd40c309b4970bc47..6fe63872d8bc7b8d09535b447c6d45657f27a73c 100644 (file)
@@ -1369,49 +1369,49 @@ static void p2m_vm_event_fill_regs(vm_event_request_t *req)
     /* Architecture-specific vmcs/vmcb bits */
     hvm_funcs.save_cpu_ctxt(curr, &ctxt);
 
-    req->regs.x86.rax = regs->eax;
-    req->regs.x86.rcx = regs->ecx;
-    req->regs.x86.rdx = regs->edx;
-    req->regs.x86.rbx = regs->ebx;
-    req->regs.x86.rsp = regs->esp;
-    req->regs.x86.rbp = regs->ebp;
-    req->regs.x86.rsi = regs->esi;
-    req->regs.x86.rdi = regs->edi;
-
-    req->regs.x86.r8  = regs->r8;
-    req->regs.x86.r9  = regs->r9;
-    req->regs.x86.r10 = regs->r10;
-    req->regs.x86.r11 = regs->r11;
-    req->regs.x86.r12 = regs->r12;
-    req->regs.x86.r13 = regs->r13;
-    req->regs.x86.r14 = regs->r14;
-    req->regs.x86.r15 = regs->r15;
-
-    req->regs.x86.rflags = regs->eflags;
-    req->regs.x86.rip    = regs->eip;
-
-    req->regs.x86.dr7 = curr->arch.debugreg[7];
-    req->regs.x86.cr0 = ctxt.cr0;
-    req->regs.x86.cr2 = ctxt.cr2;
-    req->regs.x86.cr3 = ctxt.cr3;
-    req->regs.x86.cr4 = ctxt.cr4;
-
-    req->regs.x86.sysenter_cs = ctxt.sysenter_cs;
-    req->regs.x86.sysenter_esp = ctxt.sysenter_esp;
-    req->regs.x86.sysenter_eip = ctxt.sysenter_eip;
-
-    req->regs.x86.msr_efer = ctxt.msr_efer;
-    req->regs.x86.msr_star = ctxt.msr_star;
-    req->regs.x86.msr_lstar = ctxt.msr_lstar;
+    req->data.regs.x86.rax = regs->eax;
+    req->data.regs.x86.rcx = regs->ecx;
+    req->data.regs.x86.rdx = regs->edx;
+    req->data.regs.x86.rbx = regs->ebx;
+    req->data.regs.x86.rsp = regs->esp;
+    req->data.regs.x86.rbp = regs->ebp;
+    req->data.regs.x86.rsi = regs->esi;
+    req->data.regs.x86.rdi = regs->edi;
+
+    req->data.regs.x86.r8  = regs->r8;
+    req->data.regs.x86.r9  = regs->r9;
+    req->data.regs.x86.r10 = regs->r10;
+    req->data.regs.x86.r11 = regs->r11;
+    req->data.regs.x86.r12 = regs->r12;
+    req->data.regs.x86.r13 = regs->r13;
+    req->data.regs.x86.r14 = regs->r14;
+    req->data.regs.x86.r15 = regs->r15;
+
+    req->data.regs.x86.rflags = regs->eflags;
+    req->data.regs.x86.rip    = regs->eip;
+
+    req->data.regs.x86.dr7 = curr->arch.debugreg[7];
+    req->data.regs.x86.cr0 = ctxt.cr0;
+    req->data.regs.x86.cr2 = ctxt.cr2;
+    req->data.regs.x86.cr3 = ctxt.cr3;
+    req->data.regs.x86.cr4 = ctxt.cr4;
+
+    req->data.regs.x86.sysenter_cs = ctxt.sysenter_cs;
+    req->data.regs.x86.sysenter_esp = ctxt.sysenter_esp;
+    req->data.regs.x86.sysenter_eip = ctxt.sysenter_eip;
+
+    req->data.regs.x86.msr_efer = ctxt.msr_efer;
+    req->data.regs.x86.msr_star = ctxt.msr_star;
+    req->data.regs.x86.msr_lstar = ctxt.msr_lstar;
 
     hvm_get_segment_register(curr, x86_seg_fs, &seg);
-    req->regs.x86.fs_base = seg.base;
+    req->data.regs.x86.fs_base = seg.base;
 
     hvm_get_segment_register(curr, x86_seg_gs, &seg);
-    req->regs.x86.gs_base = seg.base;
+    req->data.regs.x86.gs_base = seg.base;
 
     hvm_get_segment_register(curr, x86_seg_cs, &seg);
-    req->regs.x86.cs_arbytes = seg.attr.bytes;
+    req->data.regs.x86.cs_arbytes = seg.attr.bytes;
 }
 
 void p2m_mem_access_emulate_check(struct vcpu *v,
@@ -1466,6 +1466,10 @@ void p2m_mem_access_emulate_check(struct vcpu *v,
         }
 
         v->arch.vm_event.emulate_flags = violation ? rsp->flags : 0;
+
+        if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) &&
+             v->arch.vm_event.emul_read_data )
+            *v->arch.vm_event.emul_read_data = rsp->data.emul_read_data;
     }
 }
 
@@ -1552,9 +1556,17 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
 
     if ( v->arch.vm_event.emulate_flags )
     {
-        hvm_mem_access_emulate_one((v->arch.vm_event.emulate_flags &
-                                    VM_EVENT_FLAG_EMULATE_NOWRITE) != 0,
-                                   TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+        enum emul_kind kind = EMUL_KIND_NORMAL;
+
+        if ( v->arch.vm_event.emulate_flags &
+             VM_EVENT_FLAG_SET_EMUL_READ_DATA )
+            kind = EMUL_KIND_SET_CONTEXT;
+        else if ( v->arch.vm_event.emulate_flags &
+                  VM_EVENT_FLAG_EMULATE_NOWRITE )
+            kind = EMUL_KIND_NOWRITE;
+
+        hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
+                                   HVM_DELIVER_NO_ERROR_CODE);
 
         v->arch.vm_event.emulate_flags = 0;
         return 1;
index c3902259a6dfd422a6b7c8d4f7547966b205b8d9..ec856fbabc74a4d9d3e75cba5001e38c3f0b2f5d 100644 (file)
 #include <xen/sched.h>
 #include <asm/hvm/hvm.h>
 
+/* Implicitly serialized by the domctl lock. */
+int vm_event_init_domain(struct domain *d)
+{
+    struct vcpu *v;
+
+    for_each_vcpu ( d, v )
+    {
+        if ( v->arch.vm_event.emul_read_data )
+            continue;
+
+        v->arch.vm_event.emul_read_data =
+            xzalloc(struct vm_event_emul_read_data);
+
+        if ( !v->arch.vm_event.emul_read_data )
+            return -ENOMEM;
+    }
+
+    return 0;
+}
+
+/*
+ * Implicitly serialized by the domctl lock,
+ * or on domain cleanup paths only.
+ */
+void vm_event_cleanup_domain(struct domain *d)
+{
+    struct vcpu *v;
+
+    for_each_vcpu ( d, v )
+    {
+        xfree(v->arch.vm_event.emul_read_data);
+        v->arch.vm_event.emul_read_data = NULL;
+    }
+}
+
 void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
 {
     if ( !is_hvm_domain(d) || !atomic_read(&v->vm_event_pause_count) )
index a4b9c36cd66012594f65744150e57ffa108acd8a..0007d70a29c4288e163a521f0fafd95755e45d0d 100644 (file)
@@ -64,6 +64,11 @@ static int vm_event_enable(
     vm_event_ring_lock_init(ved);
     vm_event_ring_lock(ved);
 
+    rc = vm_event_init_domain(d);
+
+    if ( rc < 0 )
+        goto err;
+
     rc = prepare_ring_for_helper(d, ring_gfn, &ved->ring_pg_struct,
                                     &ved->ring_page);
     if ( rc < 0 )
@@ -226,6 +231,9 @@ static int vm_event_disable(struct domain *d, struct vm_event_domain *ved)
 
         destroy_ring_for_helper(&ved->ring_page,
                                 ved->ring_pg_struct);
+
+        vm_event_cleanup_domain(d);
+
         vm_event_ring_unlock(ved);
     }
 
index a517495826cb60a91c62525d09ce286d90ac9b98..20469a8720a5541184aa8e589941a2d10f95ab45 100644 (file)
 
 #include <xen/sched.h>
 
+static inline
+int vm_event_init_domain(struct domain *d)
+{
+    /* Not supported on ARM. */
+    return 0;
+}
+
+static inline
+void vm_event_cleanup_domain(struct domain *d)
+{
+    /* Not supported on ARM. */
+}
+
 static inline
 void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
 {
index 201436d5f84b9fbb3f33835392dc235bed31c4bb..c5ad1cbcf2cec8063d1cab1b6d5f32f45a6f7cbb 100644 (file)
@@ -512,6 +512,7 @@ struct arch_vcpu
         uint32_t emulate_flags;
         unsigned long gpa;
         unsigned long eip;
+        struct vm_event_emul_read_data *emul_read_data;
     } vm_event;
 
 };
index 594be383b717b475884fe3bb51f9ff82ecec7916..49134b52f79dd301880ab15172396c654447ce17 100644 (file)
@@ -32,13 +32,21 @@ struct hvm_emulate_ctxt {
     struct hvm_trap trap;
 
     uint32_t intr_shadow;
+
+    bool_t set_context;
+};
+
+enum emul_kind {
+    EMUL_KIND_NORMAL,
+    EMUL_KIND_NOWRITE,
+    EMUL_KIND_SET_CONTEXT
 };
 
 int hvm_emulate_one(
     struct hvm_emulate_ctxt *hvmemul_ctxt);
 int hvm_emulate_one_no_write(
     struct hvm_emulate_ctxt *hvmemul_ctxt);
-void hvm_mem_access_emulate_one(bool_t nowrite,
+void hvm_mem_access_emulate_one(enum emul_kind kind,
     unsigned int trapnr,
     unsigned int errcode);
 void hvm_emulate_prepare(
index 7cc3a3dff2fae179ee4771157bf7291c85c5a07e..3881783bf66306818a7e9c9aa6374526a32768b6 100644 (file)
 
 #include <xen/sched.h>
 
+int vm_event_init_domain(struct domain *d);
+
+void vm_event_cleanup_domain(struct domain *d);
+
 void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v);
 
 #endif /* __ASM_X86_VM_EVENT_H__ */
index c756c7cca7b69e6ab8e07353b66649564b0440f7..4d89c384931fe044fdc94ed69c169db7b8b00303 100644 (file)
@@ -44,9 +44,9 @@
  *  paused
  * VCPU_PAUSED in a response signals to unpause the vCPU
  */
-#define VM_EVENT_FLAG_VCPU_PAUSED       (1 << 0)
+#define VM_EVENT_FLAG_VCPU_PAUSED        (1 << 0)
 /* Flags to aid debugging vm_event */
-#define VM_EVENT_FLAG_FOREIGN           (1 << 1)
+#define VM_EVENT_FLAG_FOREIGN            (1 << 1)
 /*
  * The following flags can be set in response to a mem_access event.
  *
  * This will allow the guest to continue execution without lifting the page
  * access restrictions.
  */
-#define VM_EVENT_FLAG_EMULATE           (1 << 2)
+#define VM_EVENT_FLAG_EMULATE            (1 << 2)
 /*
- * Same as MEM_ACCESS_EMULATE, but with write operations or operations
+ * Same as VM_EVENT_FLAG_EMULATE, but with write operations or operations
  * potentially having side effects (like memory mapped or port I/O) disabled.
  */
-#define VM_EVENT_FLAG_EMULATE_NOWRITE   (1 << 3)
+#define VM_EVENT_FLAG_EMULATE_NOWRITE    (1 << 3)
 /*
  * Toggle singlestepping on vm_event response.
  * Requires the vCPU to be paused already (synchronous events only).
  */
-#define VM_EVENT_FLAG_TOGGLE_SINGLESTEP (1 << 4)
+#define VM_EVENT_FLAG_TOGGLE_SINGLESTEP  (1 << 4)
+/*
+ * Data is being sent back to the hypervisor in the event response, to be
+ * returned by the read function when emulating an instruction.
+ * This flag is only useful when combined with VM_EVENT_FLAG_EMULATE
+ * and takes precedence if combined with VM_EVENT_FLAG_EMULATE_NOWRITE
+ * (i.e. if both VM_EVENT_FLAG_EMULATE_NOWRITE and
+ * VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored).
+ */
+#define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 5)
 
 /*
  * Reasons for the vm event request
@@ -194,6 +203,12 @@ struct vm_event_sharing {
     uint32_t _pad;
 };
 
+struct vm_event_emul_read_data {
+    uint32_t size;
+    /* The struct is used in a union with vm_event_regs_x86. */
+    uint8_t  data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
+};
+
 typedef struct vm_event_st {
     uint32_t version;   /* VM_EVENT_INTERFACE_VERSION */
     uint32_t flags;     /* VM_EVENT_FLAG_* */
@@ -211,8 +226,12 @@ typedef struct vm_event_st {
     } u;
 
     union {
-        struct vm_event_regs_x86 x86;
-    } regs;
+        union {
+            struct vm_event_regs_x86 x86;
+        } regs;
+
+        struct vm_event_emul_read_data emul_read_data;
+    } data;
 } vm_event_request_t, vm_event_response_t;
 
 DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);