mov %esp, %ebp /* Stash %esp for entry_ret_to_kernel(). */
/* Prepare an IRET frame. */
- push $__USER_DS /* SS */
+ push exec_user_ss /* SS */
/* ESP */
push $user_stack + PAGE_SIZE
pushf /* EFLAGS */
-#if defined(CONFIG_PV) /* PV guests see the real interrupt flag. Clobber it. */
- andl $~X86_EFLAGS_IF, (%esp)
-#endif
+ /* Apply and/or masks to eflags. */
+ mov exec_user_efl_and_mask, %edx
+ and %edx, (%esp)
+ mov exec_user_efl_or_mask, %edx
+ or %edx, (%esp)
- push $__USER_CS /* CS */
+ push exec_user_cs /* CS */
push $exec_user_stub /* EIP */
env_IRET /* Drop to user privilege. */
mov %rsp, %rbp /* Stash %rsp for entry_ret_to_kernel(). */
/* Prepare an IRET frame. */
- push $__USER_DS /* SS */
+ push exec_user_ss(%rip) /* SS */
/* RSP */
push $user_stack + PAGE_SIZE
pushf /* RFLAGS */
-#if defined(CONFIG_PV) /* PV guests see the real interrupt flag. Clobber it. */
- andq $~X86_EFLAGS_IF, (%rsp)
-#endif
+ /* Apply and/or masks to eflags. */
+ mov exec_user_efl_and_mask(%rip), %rdx
+ and %rdx, (%esp)
+ mov exec_user_efl_or_mask(%rip), %rdx
+ or %rdx, (%esp)
- push $__USER_CS /* CS */
+ push exec_user_cs(%rip) /* CS */
push $exec_user_stub /* RIP */
env_IRETQ /* Drop to user privilege. */
extern xen_pvh_start_info_t *pvh_start_info;
extern shared_info_t shared_info;
+/*
+ * Parameters for fine tuning the exec_user_*() behaviour.
+ */
+extern unsigned long exec_user_cs, exec_user_ss;
+extern unsigned long exec_user_efl_and_mask;
+extern unsigned long exec_user_efl_or_mask;
+
#endif /* XTF_X86_TRAPS_H */
/*
#include <arch/lib.h>
#include <arch/processor.h>
+/*
+ * Parameters for fine tuning the exec_user_*() behaviour. PV guests see the
+ * real interrupt flag, so mask it by default.
+ */
+unsigned long exec_user_cs = __USER_CS;
+unsigned long exec_user_ss = __USER_DS;
+unsigned long exec_user_efl_and_mask =
+ ~(IS_DEFINED(CONFIG_PV) ? X86_EFLAGS_IF : 0);
+unsigned long exec_user_efl_or_mask;
+
bool (*xtf_unhandled_exception_hook)(struct cpu_regs *regs);
/*
VARY-CFG := hypercall vmassist
-obj-perenv += main.o asm.o
+obj-perenv += main.o
include $(ROOT)/build/gen.mk
+++ /dev/null
-#include <arch/processor.h>
-#include <arch/page.h>
-#include <arch/segment.h>
-#include <xtf/asm_macros.h>
-
-ENTRY(exec_user_with_iopl) /* void (*fn)(void), unsigned int iopl */
- push %_ASM_BP
-
- /* Prepare to "call" exec_user_stub(). */
-#ifdef __i386__
- mov (1+1)*4(%esp), %eax /* Pass fn() in %eax */
-#endif
- push $1f /* Fake return addr as if we'd called exec_user_stub(). */
- mov %_ASM_SP, %_ASM_BP /* Stash %esp for entry_ret_to_kernel(). */
-
- /* Prepare an IRET frame. */
- push $__USER_DS /* SS */
- push $user_stack + PAGE_SIZE
- /* ESP */
- pushf /* EFLAGS */
-
- /* PV guests see the real interrupt flag. Clobber it. */
- andl $~(X86_EFLAGS_IOPL | X86_EFLAGS_IF), (%_ASM_SP)
-#ifdef __i386__
- mov (5+2)*4(%esp), %ecx
- shl $12, %ecx
- or %ecx, (%esp)
-#else
- shl $12, %esi
- or %esi, (%rsp)
-#endif
-
- push $__USER_CS /* CS */
- push $exec_user_stub /* EIP */
-
-#ifdef __x86_64__
- push $0
-#endif
- jmp HYPERCALL_iret /* Drop to user privilege. */
-
-1: /* entry_ret_to_kernel() returns here with a sensible stack. */
- pop %_ASM_BP
- ret
-
-ENDFUNC(exec_user_with_iopl)
-
-/*
- * Local variables:
- * tab-width: 8
- * indent-tabs-mode: nil
- * End:
- */
bool test_wants_user_mappings = true;
-/**
- * Execute @p fn at user privilege, folding @p iopl into the iret frame.
- */
-void exec_user_with_iopl(void (*fn)(void), unsigned int iopl);
-
/** Stub CLI instruction with @#GP fixup. */
static void stub_cli(void)
{
/* Run insn in userspace. */
expect(seq->name, 1, t->should_fault(1, iopl));
- exec_user_with_iopl(seq->fn, iopl);
+ exec_user_void(seq->fn);
+
check();
}
}
* with the appropriate iopl set. Reuse the exec_user infrastructure to
* issue the iret, and execute nothing interesting in user context.
*/
- exec_user_with_iopl(nop, iopl);
+ exec_user_efl_or_mask = iopl << 12;
+ exec_user_void(nop);
}
static bool vmassist_should_fault(bool user, unsigned int iopl)