]> xenbits.xensource.com Git - xen.git/commitdiff
x86/HVM: re-order operations in hvm_ud_intercept()
authorJan Beulich <jbeulich@suse.com>
Fri, 17 Jun 2016 14:50:37 +0000 (16:50 +0200)
committerJan Beulich <jbeulich@suse.com>
Fri, 17 Jun 2016 14:50:37 +0000 (16:50 +0200)
Don't fetch CS explicitly, leverage the fact that hvm_emulate_prepare()
already does (and that hvm_virtual_to_linear_addr() doesn't alter it).

At once increase the length passed to hvm_virtual_to_linear_addr() by
one: There definitely needs to be at least one more opcode byte, and we
can avoid missing a wraparound case this way.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/arch/x86/hvm/hvm.c

index 78db903e1c175a0371c88a9db4d21f9e55f64fb0..22f045e082f468f2da59e0131d72ff87aac8f9a9 100644 (file)
@@ -3886,19 +3886,27 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
 {
     struct hvm_emulate_ctxt ctxt;
 
+    hvm_emulate_prepare(&ctxt, regs);
+
     if ( opt_hvm_fep )
     {
         struct vcpu *cur = current;
-        struct segment_register cs;
+        const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
         unsigned long addr;
         char sig[5]; /* ud2; .ascii "xen" */
 
-        hvm_get_segment_register(cur, x86_seg_cs, &cs);
-        if ( hvm_virtual_to_linear_addr(x86_seg_cs, &cs, regs->eip,
-                                        sizeof(sig), hvm_access_insn_fetch,
+        /*
+         * Note that in the call below we pass 1 more than the signature
+         * size, to guard against the overall code sequence wrapping between
+         * "prefix" and actual instruction. There's necessarily at least one
+         * actual instruction byte required, so this won't cause failure on
+         * legitimate uses.
+         */
+        if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->eip,
+                                        sizeof(sig) + 1, hvm_access_insn_fetch,
                                         (hvm_long_mode_enabled(cur) &&
-                                         cs.attr.fields.l) ? 64 :
-                                        cs.attr.fields.db ? 32 : 16, &addr) &&
+                                         cs->attr.fields.l) ? 64 :
+                                        cs->attr.fields.db ? 32 : 16, &addr) &&
              (hvm_fetch_from_guest_virt_nofault(sig, addr, sizeof(sig),
                                                 0) == HVMCOPY_okay) &&
              (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
@@ -3908,8 +3916,6 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
         }
     }
 
-    hvm_emulate_prepare(&ctxt, regs);
-
     switch ( hvm_emulate_one(&ctxt) )
     {
     case X86EMUL_UNHANDLEABLE: