]> xenbits.xensource.com Git - xen.git/commitdiff
x86/vm_event: allow overwriting Xen's i-cache used for emulation
authorTamas K Lengyel <tamas.lengyel@zentific.com>
Mon, 26 Sep 2016 16:04:11 +0000 (18:04 +0200)
committerJan Beulich <jbeulich@suse.com>
Mon, 26 Sep 2016 16:04:11 +0000 (18:04 +0200)
When emulating instructions Xen's emulator maintains a small i-cache fetched
from the guest memory. This patch extends the vm_event interface to allow
overwriting this i-cache via a buffer returned in the vm_event response.

When responding to a SOFTWARE_BREAKPOINT event (INT3) the monitor subscriber
normally has to remove the INT3 from memory - singlestep - place back INT3
to allow the guest to continue execution. This routine however is susceptible
to a race-condition on multi-vCPU guests. By allowing the subscriber to return
the i-cache to be used for emulation it can side-step the problem by returning
a clean buffer without the INT3 present.

As part of this patch we rename hvm_mem_access_emulate_one to
hvm_emulate_one_vm_event to better reflect that it is used in various vm_event
scenarios now, not just in response to mem_access events.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
xen/arch/x86/hvm/emulate.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/vm_event.c
xen/include/asm-x86/hvm/emulate.h
xen/include/asm-x86/vm_event.h
xen/include/public/vm_event.h

index cc25676c7416e7d86290759701e34d6458051dd4..17f7f0d3c34553efbc4e3a3c0312d07d99183c1d 100644 (file)
@@ -76,9 +76,9 @@ static int set_context_data(void *buffer, unsigned int size)
     if ( curr->arch.vm_event )
     {
         unsigned int safe_size =
-            min(size, curr->arch.vm_event->emul_read_data.size);
+            min(size, curr->arch.vm_event->emul.read.size);
 
-        memcpy(buffer, curr->arch.vm_event->emul_read_data.data, safe_size);
+        memcpy(buffer, curr->arch.vm_event->emul.read.data, safe_size);
         memset(buffer + safe_size, 0, size - safe_size);
         return X86EMUL_OKAY;
     }
@@ -1931,7 +1931,7 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
     return rc;
 }
 
-void hvm_mem_access_emulate_one(enum emul_kind kind, unsigned int trapnr,
+void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
     unsigned int errcode)
 {
     struct hvm_emulate_ctxt ctx = {{ 0 }};
@@ -1944,10 +1944,25 @@ void hvm_mem_access_emulate_one(enum emul_kind kind, unsigned int trapnr,
     case EMUL_KIND_NOWRITE:
         rc = hvm_emulate_one_no_write(&ctx);
         break;
-    case EMUL_KIND_SET_CONTEXT:
-        ctx.set_context = 1;
-        /* Intentional fall-through. */
+    case EMUL_KIND_SET_CONTEXT_INSN: {
+        struct vcpu *curr = current;
+        struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+
+        BUILD_BUG_ON(sizeof(vio->mmio_insn) !=
+                     sizeof(curr->arch.vm_event->emul.insn.data));
+        ASSERT(!vio->mmio_insn_bytes);
+
+        /*
+         * Stash insn buffer into mmio buffer here instead of ctx
+         * to avoid having to add more logic to hvm_emulate_one.
+         */
+        vio->mmio_insn_bytes = sizeof(vio->mmio_insn);
+        memcpy(vio->mmio_insn, curr->arch.vm_event->emul.insn.data,
+               vio->mmio_insn_bytes);
+    }
+    /* Fall-through */
     default:
+        ctx.set_context = (kind == EMUL_KIND_SET_CONTEXT_DATA);
         rc = hvm_emulate_one(&ctx);
     }
 
index 73730d1fe0225cfa84dfdf508049d48144fa9741..34a8c858c76ddfc24d2f5dd5ff5368441533d4fd 100644 (file)
@@ -487,15 +487,22 @@ void hvm_do_resume(struct vcpu *v)
         {
             enum emul_kind kind = EMUL_KIND_NORMAL;
 
+            /*
+             * Please observ the order here to match the flag descriptions
+             * provided in public/vm_event.h
+             */
             if ( v->arch.vm_event->emulate_flags &
                  VM_EVENT_FLAG_SET_EMUL_READ_DATA )
-                kind = EMUL_KIND_SET_CONTEXT;
+                kind = EMUL_KIND_SET_CONTEXT_DATA;
             else if ( v->arch.vm_event->emulate_flags &
                       VM_EVENT_FLAG_EMULATE_NOWRITE )
                 kind = EMUL_KIND_NOWRITE;
+            else if ( v->arch.vm_event->emulate_flags &
+                      VM_EVENT_FLAG_SET_EMUL_INSN_DATA )
+                kind = EMUL_KIND_SET_CONTEXT_INSN;
 
-            hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
-                                       HVM_DELIVER_NO_ERROR_CODE);
+            hvm_emulate_one_vm_event(kind, TRAP_invalid_op,
+                                     HVM_DELIVER_NO_ERROR_CODE);
 
             v->arch.vm_event->emulate_flags = 0;
         }
index 343b9c8936728880f4646442992ec7e8b5412254..1e88d675d6084d2fd900db76cb4ca2a1a4e83fe8 100644 (file)
@@ -209,11 +209,20 @@ void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
         if ( p2m_mem_access_emulate_check(v, rsp) )
         {
             if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA )
-                v->arch.vm_event->emul_read_data = rsp->data.emul_read_data;
+                v->arch.vm_event->emul.read = rsp->data.emul.read;
 
             v->arch.vm_event->emulate_flags = rsp->flags;
         }
         break;
+
+    case VM_EVENT_REASON_SOFTWARE_BREAKPOINT:
+        if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_INSN_DATA )
+        {
+            v->arch.vm_event->emul.insn = rsp->data.emul.insn;
+            v->arch.vm_event->emulate_flags = rsp->flags;
+        }
+        break;
+
     default:
         break;
     };
index 3aabcbeeccc6894f714c3bd1ad8a721bfbe916c4..96d8f0bcc5f38463dae801a51bdf5cbdda9e99b6 100644 (file)
@@ -40,14 +40,15 @@ struct hvm_emulate_ctxt {
 enum emul_kind {
     EMUL_KIND_NORMAL,
     EMUL_KIND_NOWRITE,
-    EMUL_KIND_SET_CONTEXT
+    EMUL_KIND_SET_CONTEXT_DATA,
+    EMUL_KIND_SET_CONTEXT_INSN
 };
 
 int hvm_emulate_one(
     struct hvm_emulate_ctxt *hvmemul_ctxt);
 int hvm_emulate_one_no_write(
     struct hvm_emulate_ctxt *hvmemul_ctxt);
-void hvm_mem_access_emulate_one(enum emul_kind kind,
+void hvm_emulate_one_vm_event(enum emul_kind kind,
     unsigned int trapnr,
     unsigned int errcode);
 void hvm_emulate_prepare(
index ebb5d88ad0e5640bcce1900f53fcdeb6b9d5c59b..ca73f99dcd65ac28c25c1dac7043ed198059cb6a 100644 (file)
  */
 struct arch_vm_event {
     uint32_t emulate_flags;
-    struct vm_event_emul_read_data emul_read_data;
+    union {
+        struct vm_event_emul_read_data read;
+        struct vm_event_emul_insn_data insn;
+    } emul;
     struct monitor_write_data write_data;
 };
 
index f756126f2f6debc4a56e6ab7e0224ea726fd54b9..ba8e3873dd32bbb59f97d02fa9feebb3a66ad4c0 100644 (file)
  * Requires the vCPU to be paused already (synchronous events only).
  */
 #define VM_EVENT_FLAG_SET_REGISTERS      (1 << 8)
+/*
+ * Instruction cache is being sent back to the hypervisor in the event response
+ * to be used by the emulator. This flag is only useful when combined with
+ * VM_EVENT_FLAG_EMULATE and does not take presedence if combined with
+ * VM_EVENT_FLAG_EMULATE_NOWRITE or VM_EVENT_FLAG_SET_EMUL_READ_DATA, (i.e.
+ * if any of those flags are set, only those will be honored).
+ */
+#define VM_EVENT_FLAG_SET_EMUL_INSN_DATA (1 << 9)
 
 /*
  * Reasons for the vm event request
@@ -265,6 +273,10 @@ struct vm_event_emul_read_data {
     uint8_t  data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
 };
 
+struct vm_event_emul_insn_data {
+    uint8_t data[16]; /* Has to be completely filled */
+};
+
 typedef struct vm_event_st {
     uint32_t version;   /* VM_EVENT_INTERFACE_VERSION */
     uint32_t flags;     /* VM_EVENT_FLAG_* */
@@ -291,7 +303,10 @@ typedef struct vm_event_st {
             struct vm_event_regs_arm arm;
         } regs;
 
-        struct vm_event_emul_read_data emul_read_data;
+        union {
+            struct vm_event_emul_read_data read;
+            struct vm_event_emul_insn_data insn;
+        } emul;
     } data;
 } vm_event_request_t, vm_event_response_t;