void asm_domain_crash_synchronous(unsigned long addr)
{
+ /*
+ * We need clear AC bit here because in entry.S AC is set
+ * by ASM_STAC to temporarily allow accesses to user pages
+ * which is prevented by SMAP by default.
+ *
+ * For some code paths, where this function is called, clac()
+ * is not needed, but adding clac() here instead of each place
+ * asm_domain_crash_synchronous() is called can reduce the code
+ * redundancy, and it is harmless as well.
+ */
+ clac();
+
if ( addr == 0 )
addr = this_cpu(last_extable_addr);
{
unsigned long __d0, __d1, __d2, __n = n;
+ stac();
asm volatile (
" cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
" jbe 1f\n"
: "=&c" (__n), "=&D" (__d0), "=&S" (__d1), "=&r" (__d2)
: "0" (__n), "1" (to), "2" (from), "3" (__n)
: "memory" );
+ clac();
return __n;
}
{
unsigned long __d0, __d1, __d2, __n = n;
+ stac();
asm volatile (
" cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
" jbe 1f\n"
: "=&c" (__n), "=&D" (__d0), "=&S" (__d1), "=&r" (__d2)
: "0" (__n), "1" (to), "2" (from), "3" (__n)
: "memory" );
+ clac();
return __n;
}
#define __do_clear_user(addr,size) \
do { \
long __d0; \
+ stac(); \
__asm__ __volatile__( \
"0: rep; stosl\n" \
" movl %2,%0\n" \
_ASM_EXTABLE(1b,2b) \
: "=&c"(size), "=&D" (__d0) \
: "r"(size & 3), "0"(size / 4), "1"((long)addr), "a"(0)); \
+ clac(); \
} while (0)
/**
movb TRAPBOUNCE_flags(%rdx),%cl
subq $40,%rsi
movq UREGS_ss+8(%rsp),%rax
+ ASM_STAC
.Lft2: movq %rax,32(%rsi) # SS
movq UREGS_rsp+8(%rsp),%rax
.Lft3: movq %rax,24(%rsi) # RSP
.Lft12: movq %rax,8(%rsi) # R11
movq UREGS_rcx+8(%rsp),%rax
.Lft13: movq %rax,(%rsi) # RCX
+ ASM_CLAC
/* Rewrite our stack frame and return to guest-OS mode. */
/* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
/* Also clear AC: alignment checks shouldn't trigger in kernel mode. */
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+ stac(); \
__asm__ __volatile__( \
"1: mov"itype" %"rtype"1,%2\n" \
"2:\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r"(err) \
- : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
+ : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)); \
+ clac()
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+ stac(); \
__asm__ __volatile__( \
"1: mov"itype" %2,%"rtype"1\n" \
"2:\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r"(err), ltype (x) \
- : "m"(__m(addr)), "i"(errret), "0"(err))
+ : "m"(__m(addr)), "i"(errret), "0"(err)); \
+ clac()
/**
* __copy_to_user: - Copy a block of data into user space, with less checking
* is the same as the initial value of _o then _n is written to location _p.
*/
#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
+ stac(); \
asm volatile ( \
"1: lock; cmpxchg"_isuff" %"_oppre"2,%3\n" \
"2:\n" \
_ASM_EXTABLE(1b, 3b) \
: "=a" (_o), "=r" (_rc) \
: _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
- : "memory");
+ : "memory"); \
+ clac()
#define cmpxchg_user(_p,_o,_n) \
({ \