]> xenbits.xensource.com Git - xen.git/commitdiff
x86: force EFLAGS.IF on when exiting to PV guests
authorJan Beulich <jbeulich@suse.com>
Wed, 21 Dec 2016 16:47:08 +0000 (17:47 +0100)
committerJan Beulich <jbeulich@suse.com>
Wed, 21 Dec 2016 16:47:08 +0000 (17:47 +0100)
Guest kernels modifying instructions in the process of being emulated
for another of their vCPU-s may effect EFLAGS.IF to be cleared upon
next exiting to guest context, by converting the being emulated
instruction to CLI (at the right point in time). Prevent any such bad
effects by always forcing EFLAGS.IF on. And to cover hypothetical other
similar issues, also force EFLAGS.{IOPL,NT,VM} to zero.

This is CVE-2016-10024 / XSA-202.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
master commit: 0e47f92b072548800223f9a21ea051a017173915
master date: 2016-12-21 16:46:13 +0100

xen/arch/x86/x86_64/compat/entry.S
xen/arch/x86/x86_64/entry.S

index f69813ddba9986e317b7255efd025249ff2aa403..bced03735adaf00ba5e3d8f3cdb6315deb77e544 100644 (file)
@@ -174,6 +174,8 @@ compat_bad_hypercall:
 /* %rbx: struct vcpu, interrupts disabled */
 ENTRY(compat_restore_all_guest)
         ASSERT_INTERRUPTS_DISABLED
+        mov   $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),%r11d
+        and   UREGS_eflags(%rsp),%r11d
 .Lcr4_orig:
         .skip .Lcr4_alt_end - .Lcr4_alt, 0x90
 .Lcr4_orig_end:
@@ -209,6 +211,8 @@ ENTRY(compat_restore_all_guest)
                              (.Lcr4_orig_end - .Lcr4_orig), \
                              (.Lcr4_alt_end - .Lcr4_alt)
         .popsection
+        or    $X86_EFLAGS_IF,%r11
+        mov   %r11d,UREGS_eflags(%rsp)
         RESTORE_ALL adj=8 compat=1
 .Lft0:  iretq
 
index c6c0014638898a947e8fced2dff0fd15eabe0c90..d2c8e84c23a60cfbe7d0834fb03b2888d7091ec6 100644 (file)
@@ -41,28 +41,29 @@ restore_all_guest:
         testw $TRAP_syscall,4(%rsp)
         jz    iret_exit_to_guest
 
+        movq  24(%rsp),%r11           # RFLAGS
+        andq  $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),%r11
+        orq   $X86_EFLAGS_IF,%r11
+
         /* Don't use SYSRET path if the return address is not canonical. */
         movq  8(%rsp),%rcx
         sarq  $47,%rcx
         incl  %ecx
         cmpl  $1,%ecx
-        ja    .Lforce_iret
+        movq  8(%rsp),%rcx            # RIP
+        ja    iret_exit_to_guest
 
         cmpw  $FLAT_USER_CS32,16(%rsp)# CS
-        movq  8(%rsp),%rcx            # RIP
-        movq  24(%rsp),%r11           # RFLAGS
         movq  32(%rsp),%rsp           # RSP
         je    1f
         sysretq
 1:      sysretl
 
-.Lforce_iret:
-        /* Mimic SYSRET behavior. */
-        movq  8(%rsp),%rcx            # RIP
-        movq  24(%rsp),%r11           # RFLAGS
         ALIGN
 /* No special register assumptions. */
 iret_exit_to_guest:
+        andl  $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),24(%rsp)
+        orl   $X86_EFLAGS_IF,24(%rsp)
         addq  $8,%rsp
 .Lft0:  iretq