]> xenbits.xensource.com Git - people/royger/xen.git/commitdiff
x86: temporarily disable SMAP to legally access user pages in kernel mode
authorFeng Wu <feng.wu@intel.com>
Mon, 12 May 2014 15:02:25 +0000 (17:02 +0200)
committerJan Beulich <jbeulich@suse.com>
Mon, 12 May 2014 15:02:25 +0000 (17:02 +0200)
Use STAC/CLAC to temporarily disable SMAP to allow legal accesses to
user pages in kernel mode

STAC/CLAC is not needed for compat_create_bounce_frame, since in this
chunk of code, it only accesses the pv guest's kernel stack, which is
in ring 1 for 32-bit pv guests.

Signed-off-by: Feng Wu <feng.wu@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
xen/arch/x86/traps.c
xen/arch/x86/usercopy.c
xen/arch/x86/x86_64/entry.S
xen/include/asm-x86/uaccess.h
xen/include/asm-x86/x86_64/system.h

index ac68a8540b312832b7209c605dda717a74460dcd..ba7d387afb2427f8e05c741466ef50924956f7ce 100644 (file)
@@ -3822,6 +3822,18 @@ unsigned long do_get_debugreg(int reg)
 
 void asm_domain_crash_synchronous(unsigned long addr)
 {
+    /*
+     * We need clear AC bit here because in entry.S AC is set
+     * by ASM_STAC to temporarily allow accesses to user pages
+     * which is prevented by SMAP by default.
+     *
+     * For some code paths, where this function is called, clac()
+     * is not needed, but adding clac() here instead of each place
+     * asm_domain_crash_synchronous() is called can reduce the code
+     * redundancy, and it is harmless as well.
+     */
+    clac();
+
     if ( addr == 0 )
         addr = this_cpu(last_extable_addr);
 
index b79202bc98fe111cb8876e5c16d605e2e99a958a..4cc78f5968ed100002a226eda723e2df83f44191 100644 (file)
@@ -14,6 +14,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n)
 {
     unsigned long __d0, __d1, __d2, __n = n;
 
+    stac();
     asm volatile (
         "    cmp  $"STR(2*BYTES_PER_LONG-1)",%0\n"
         "    jbe  1f\n"
@@ -42,6 +43,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n)
         : "=&c" (__n), "=&D" (__d0), "=&S" (__d1), "=&r" (__d2)
         : "0" (__n), "1" (to), "2" (from), "3" (__n)
         : "memory" );
+    clac();
 
     return __n;
 }
@@ -51,6 +53,7 @@ __copy_from_user_ll(void *to, const void __user *from, unsigned n)
 {
     unsigned long __d0, __d1, __d2, __n = n;
 
+    stac();
     asm volatile (
         "    cmp  $"STR(2*BYTES_PER_LONG-1)",%0\n"
         "    jbe  1f\n"
@@ -85,6 +88,7 @@ __copy_from_user_ll(void *to, const void __user *from, unsigned n)
         : "=&c" (__n), "=&D" (__d0), "=&S" (__d1), "=&r" (__d2)
         : "0" (__n), "1" (to), "2" (from), "3" (__n)
         : "memory" );
+    clac();
 
     return __n;
 }
@@ -113,6 +117,7 @@ copy_to_user(void __user *to, const void *from, unsigned n)
 #define __do_clear_user(addr,size)                                     \
 do {                                                                   \
        long __d0;                                                      \
+       stac();                                                         \
        __asm__ __volatile__(                                           \
                "0:     rep; stosl\n"                                   \
                "       movl %2,%0\n"                                   \
@@ -126,6 +131,7 @@ do {                                                                        \
                _ASM_EXTABLE(1b,2b)                                     \
                : "=&c"(size), "=&D" (__d0)                             \
                : "r"(size & 3), "0"(size / 4), "1"((long)addr), "a"(0));       \
+       clac();                                                         \
 } while (0)
 
 /**
index 42f66bf8d5008270b7de129ad4837948a39bbe06..4796e6563b2b475787d3c4aef09ba825974c1680 100644 (file)
@@ -379,6 +379,7 @@ __UNLIKELY_END(create_bounce_frame_bad_sp)
         movb  TRAPBOUNCE_flags(%rdx),%cl
         subq  $40,%rsi
         movq  UREGS_ss+8(%rsp),%rax
+        ASM_STAC
 .Lft2:  movq  %rax,32(%rsi)             # SS
         movq  UREGS_rsp+8(%rsp),%rax
 .Lft3:  movq  %rax,24(%rsi)             # RSP
@@ -423,6 +424,7 @@ UNLIKELY_END(bounce_failsafe)
 .Lft12: movq  %rax,8(%rsi)              # R11
         movq  UREGS_rcx+8(%rsp),%rax
 .Lft13: movq  %rax,(%rsi)               # RCX
+        ASM_CLAC
         /* Rewrite our stack frame and return to guest-OS mode. */
         /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
         /* Also clear AC: alignment checks shouldn't trigger in kernel mode. */
index 88b4ba2b7d930f4288c312956643dd34edc6b8bf..947470d9dfaf81d7c675eb29df24feb01737dc3b 100644 (file)
@@ -146,6 +146,7 @@ struct __large_struct { unsigned long buf[100]; };
  * aliasing issues.
  */
 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
+       stac();                                                         \
        __asm__ __volatile__(                                           \
                "1:     mov"itype" %"rtype"1,%2\n"                      \
                "2:\n"                                                  \
@@ -155,9 +156,11 @@ struct __large_struct { unsigned long buf[100]; };
                ".previous\n"                                           \
                _ASM_EXTABLE(1b, 3b)                                    \
                : "=r"(err)                                             \
-               : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
+               : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err));    \
+       clac()
 
 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
+       stac();                                                         \
        __asm__ __volatile__(                                           \
                "1:     mov"itype" %2,%"rtype"1\n"                      \
                "2:\n"                                                  \
@@ -168,7 +171,8 @@ struct __large_struct { unsigned long buf[100]; };
                ".previous\n"                                           \
                _ASM_EXTABLE(1b, 3b)                                    \
                : "=r"(err), ltype (x)                                  \
-               : "m"(__m(addr)), "i"(errret), "0"(err))
+               : "m"(__m(addr)), "i"(errret), "0"(err));               \
+       clac()
 
 /**
  * __copy_to_user: - Copy a block of data into user space, with less checking
index 20f038bf9490d360d729873ef601f14a04c99ed5..662813ad8c3376395943ef035b3cb58e91ea241f 100644 (file)
@@ -12,6 +12,7 @@
  * is the same as the initial value of _o then _n is written to location _p.
  */
 #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype)                 \
+    stac();                                                             \
     asm volatile (                                                      \
         "1: lock; cmpxchg"_isuff" %"_oppre"2,%3\n"                      \
         "2:\n"                                                          \
@@ -22,7 +23,8 @@
         _ASM_EXTABLE(1b, 3b)                                            \
         : "=a" (_o), "=r" (_rc)                                         \
         : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
-        : "memory");
+        : "memory");                                                    \
+    clac()
 
 #define cmpxchg_user(_p,_o,_n)                                          \
 ({                                                                      \