#include <arch/idt.h>
+#include <arch/page.h>
#include <arch/processor.h>
#include <arch/segment.h>
#include <xtf/asm_macros.h>
ENTRY(entry_ret_to_kernel) /* int $X86_VEC_RET2KERN */
-
- /* User required to ensure this is called from CPL > KERNEL_RPL */
-
- mov 0*4(%esp), %ecx /* Stash %eip from iret frame */
- mov 3*4(%esp), %esp /* Load %esp from iret frame */
- jmp *%ecx /* Jump back */
-
+ mov %ebp, %esp /* Restore %esp to exec_user_param()'s context. */
+ ret
ENDFUNC(entry_ret_to_kernel)
ENTRY(exec_user_param)
* 1*4(%esp) ulong (*fn)(ulong)
* 0*4(%esp) return address
*/
+ push %ebp
+
+ /* Prepare to "call" exec_user_stub(). */
+ mov (1+1)*4(%esp), %eax /* Pass fn() in %eax */
+ mov (1+2)*4(%esp), %ecx /* Pass p1 in %ecx */
+ push $1f /* Fake return addr as if we'd called exec_user_stub(). */
+ mov %esp, %ebp /* Stash %esp for entry_ret_to_kernel(). */
+ /* Prepare an IRET frame. */
push $__USER_DS /* SS */
- push %esp
- addl $4, (%esp) /* ESP */
+ /* ESP */
+ push $user_stack + PAGE_SIZE
pushf /* EFLAGS */
#if defined(CONFIG_PV) /* PV guests see the real interrupt flag. Clobber it. */
#endif
push $__USER_CS /* CS */
- push $1f /* EIP */
+ push $exec_user_stub /* EIP */
env_IRET /* Drop to user privilege. */
-1:
- push 2*4(%esp) /* Re-push p1 for fn()'s call frame. */
- call *(1+1)*4(%esp) /* fn(p1) */
- add $1*4, %esp /* Pop p1. */
- int $X86_VEC_RET2KERN /* Return to kernel privilege. */
+1: /* entry_ret_to_kernel() returns here with a sensible stack. */
+ pop %ebp
ret
ENDFUNC(exec_user_param)
+.pushsection .text.user, "ax", @progbits
+ENTRY(exec_user_stub)
+ /*
+ * For SMEP/SMAP safety, no shared stack can be used, so all
+ * parameters are passed in registers.
+ */
+ push %ecx /* Push p1 for fn()'s call frame. */
+ call *%eax /* fn(p1) */
+
+ int $X86_VEC_RET2KERN /* Return to kernel privilege. */
+ENDFUNC(exec_user_stub)
+.popsection
+
/*
* Local variables:
* tab-width: 8
#include <arch/idt.h>
+#include <arch/page.h>
#include <arch/processor.h>
#include <arch/segment.h>
#include <xtf/asm_macros.h>
ENTRY(entry_ret_to_kernel) /* int $X86_VEC_RET2KERN */
env_ADJUST_FRAME
- mov 0*8(%rsp), %rdi /* Stash %rip from iret frame */
- mov 3*8(%rsp), %rsp /* Load %esp from iret frame */
- jmp *%rdi /* Jump back */
-
+ mov %rbp, %rsp /* Restore %rsp to exec_user_param()'s context. */
+ ret
ENDFUNC(entry_ret_to_kernel)
ENTRY(exec_user_param) /* ulong (*fn)(ulong), ulong p1 */
+ push %rbp
+
+ /* Prepare to "call" exec_user_stub(). */
+ push $1f /* Fake return addr as if we'd called exec_user_stub(). */
+ mov %rsp, %rbp /* Stash %rsp for entry_ret_to_kernel(). */
+ /* Prepare an IRET frame. */
push $__USER_DS /* SS */
- push %rsp
- addq $8, (%rsp) /* RSP */
+ /* RSP */
+ push $user_stack + PAGE_SIZE
pushf /* RFLAGS */
#if defined(CONFIG_PV) /* PV guests see the real interrupt flag. Clobber it. */
#endif
push $__USER_CS /* CS */
- push $1f /* RIP */
+ push $exec_user_stub /* RIP */
env_IRETQ /* Drop to user privilege. */
-1:
- xchg %rdi, %rsi /* Swap p1 to be first parameter to fn(). */
- call *%rsi /* fn(p1) */
- int $X86_VEC_RET2KERN /* Return to kernel privilege. */
+1: /* entry_ret_to_kernel() returns here with a sensible stack. */
+ pop %rbp
ret
ENDFUNC(exec_user_param)
+.pushsection .text.user, "ax", @progbits
+ENTRY(exec_user_stub)
+ xchg %rdi, %rsi /* Swap p1 to be first parameter to fn(). */
+ call *%rsi /* fn(p1) */
+
+ int $X86_VEC_RET2KERN /* Return to kernel privilege. */
+ENDFUNC(exec_user_stub)
+.popsection
+
/*
* Local variables:
* tab-width: 8
l1_identmap[gfn] |= _PAGE_USER;
+ extern const char __start_user_text[], __end_user_text[];
+ unsigned long end = virt_to_gfn(__end_user_text);
+
+ if ( gfn >= ARRAY_SIZE(l1_identmap) )
+ panic("__{start,end}_user_text[] outside of l1_identmap[]\n");
+
+ for ( gfn = virt_to_gfn(__start_user_text); gfn < end; ++gfn )
+ l1_identmap[gfn] |= _PAGE_USER;
+
write_cr3((unsigned long)&cr3_target);
}
}
.text : {
*(.text)
+
+ . = ALIGN(PAGE_SIZE);
+ __start_user_text = .;
+ *(.text.user)
+ . = ALIGN(PAGE_SIZE);
+ __end_user_text = .;
+
} :load = 0
.data : {
ASSERT(IS_ALIGNED(boot_stack, PAGE_SIZE), "boot_stack misaligned");
ASSERT(IS_ALIGNED(user_stack, PAGE_SIZE), "user_stack misaligned");
+ASSERT(IS_ALIGNED(__start_user_text, PAGE_SIZE), "__start_user_text misaligned");
+ASSERT(IS_ALIGNED(__end_user_text, PAGE_SIZE), "__end_user_text misaligned");
+
#ifdef CONFIG_HVM
ASSERT(IS_ALIGNED(pae_l1_identmap, PAGE_SIZE), "pae_l1_identmap misaligned");
if ( hypercall_update_va_mapping(user_stack, nl1e, UVMF_INVLPG) )
panic("Unable to remap user_stack with _PAGE_USER\n");
+
+ extern const char __start_user_text[], __end_user_text[];
+ unsigned long va = (unsigned long)__start_user_text;
+ while ( va < (unsigned long)__end_user_text )
+ {
+ nl1e = pte_from_virt(_p(va), PF_SYM(AD, U, RW, P));
+
+ if ( hypercall_update_va_mapping(_p(va), nl1e, UVMF_INVLPG) )
+ panic("Unable to remap user_text with _PAGE_USER\n");
+
+ va += PAGE_SIZE;
+ }
}
#endif
/**
* Execute fn(p1) at user privilege, passing its return value back.
+ *
+ * fn() is executed on user_stack[], and is non-reentrant.
*/
unsigned long exec_user_param(unsigned long (*fn)(unsigned long),
unsigned long p1);
#include <arch/processor.h>
+#include <arch/page.h>
#include <arch/segment.h>
#include <xtf/asm_macros.h>
ENTRY(exec_user_with_iopl) /* void (*fn)(void), unsigned int iopl */
+ push %_ASM_BP
- push $__USER_DS /* SS */
+ /* Prepare to "call" exec_user_stub(). */
#ifdef __i386__
- push %esp
- addl $4, (%esp) /* ESP */
-#else
- push %rsp
- addq $8, (%rsp)
+ mov (1+1)*4(%esp), %eax /* Pass fn() in %eax */
#endif
+ push $1f /* Fake return addr as if we'd called exec_user_stub(). */
+ mov %_ASM_SP, %_ASM_BP /* Stash %esp for entry_ret_to_kernel(). */
+
+ /* Prepare an IRET frame. */
+ push $__USER_DS /* SS */
+ push $user_stack + PAGE_SIZE
+ /* ESP */
pushf /* EFLAGS */
/* PV guests see the real interrupt flag. Clobber it. */
andl $~(X86_EFLAGS_IOPL | X86_EFLAGS_IF), (%_ASM_SP)
#ifdef __i386__
- mov 5*4(%esp), %eax
- shl $12, %eax
- or %eax, (%esp)
+ mov (5+2)*4(%esp), %ecx
+ shl $12, %ecx
+ or %ecx, (%esp)
#else
shl $12, %esi
or %esi, (%rsp)
#endif
push $__USER_CS /* CS */
- push $1f /* EIP */
+ push $exec_user_stub /* EIP */
#ifdef __x86_64__
push $0
#endif
jmp HYPERCALL_iret /* Drop to user privilege. */
-1:
-#ifdef __i386__
- call *4(%esp) /* fn() */
-#else
- call *%rdi
-#endif
- int $0x20 /* Return to kernel privilege. */
+1: /* entry_ret_to_kernel() returns here with a sensible stack. */
+ pop %_ASM_BP
ret
ENDFUNC(exec_user_with_iopl)
bool test_wants_user_mappings = true;
/**
- * Execute @p fn at user privilege on the current stack, folding @p iopl into
- * the iret frame.
+ * Execute @p fn at user privilege, folding @p iopl into the iret frame.
*/
void exec_user_with_iopl(void (*fn)(void), unsigned int iopl);