From: Andrew Cooper Date: Fri, 13 Sep 2024 14:41:46 +0000 (+0100) Subject: XTF: Use fastcall by default for 32bit X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=c9a5e404e70c21c7621db4b8cabdf68261db7e1c;p=people%2Fandrewcoop%2Fxen-test-framework.git XTF: Use fastcall by default for 32bit This passes up to 3 parameters in registers, rather than on the stack. Most transformations are easy. The exec_user() infrastructure took two parameters on the stack and used an ad-hoc %eax/%ecx arrangement with %edx as a scratch register. Fastcall uses %eax/%edx, so switch the scratch regsiter to %ecx instead. memop-seg was already using %eax, and needs a bit of care now that addr isn't on the stack. However, it does remove the need for stack_adj which simplifies the result. Signed-off-by: Andrew Cooper --- diff --git a/arch/x86/entry_32.S b/arch/x86/entry_32.S index b76d7ef..31c6526 100644 --- a/arch/x86/entry_32.S +++ b/arch/x86/entry_32.S @@ -84,9 +84,8 @@ handle_exception: mov %eax, %ds mov %eax, %es - push %esp /* struct cpu_regs * */ + mov %esp, %eax /* struct cpu_regs * */ call do_exception - add $4, %esp RESTORE_ALL @@ -104,17 +103,10 @@ ENTRY(entry_ret_to_kernel) /* int $X86_VEC_RET2KERN */ ret ENDFUNC(entry_ret_to_kernel) -ENTRY(exec_user_param) - /* - * 2*4(%esp) ulong p1 - * 1*4(%esp) ulong (*fn)(ulong) - * 0*4(%esp) return address - */ +ENTRY(exec_user_param) /* %eax = ulong (*fn)(ulong p1), %edx = ulong p1 */ push %ebp /* Prepare to "call" exec_user_stub(). */ - mov (1+1)*4(%esp), %eax /* Pass fn() in %eax */ - mov (1+2)*4(%esp), %ecx /* Pass p1 in %ecx */ push $1f /* Fake return addr as if we'd called exec_user_stub(). */ mov %esp, %ebp /* Stash %esp for entry_ret_to_kernel(). */ @@ -125,10 +117,10 @@ ENTRY(exec_user_param) pushf /* EFLAGS */ /* Apply and/or masks to eflags. */ - mov exec_user_efl_and_mask, %edx - and %edx, (%esp) - mov exec_user_efl_or_mask, %edx - or %edx, (%esp) + mov exec_user_efl_and_mask, %ecx + and %ecx, (%esp) + mov exec_user_efl_or_mask, %ecx + or %ecx, (%esp) push exec_user_cs /* CS */ push $exec_user_stub /* EIP */ @@ -142,13 +134,9 @@ ENTRY(exec_user_param) ENDFUNC(exec_user_param) .pushsection .text.user, "ax", @progbits -ENTRY(exec_user_stub) - /* - * For SMEP/SMAP safety, no shared stack can be used, so all - * parameters are passed in registers. - */ - push %ecx /* Push p1 for fn()'s call frame. */ - call *%eax /* fn(p1) */ +ENTRY(exec_user_stub) /* %eax = ulong (*fn)(ulong p1), %edx = ulong p1 */ + xchg %eax, %edx /* Swap p1 to be first parameter to fn(). */ + call *%edx /* fn(p1) */ int $X86_VEC_RET2KERN /* Return to kernel privilege. */ ENDFUNC(exec_user_stub) @@ -167,9 +155,8 @@ ENTRY(entry_EVTCHN) mov %eax, %ds mov %eax, %es - push %esp /* struct cpu_regs * */ + mov %esp, %eax /* struct cpu_regs * */ call do_evtchn - add $4, %esp RESTORE_ALL @@ -195,9 +182,8 @@ ENTRY(entry_SYSCALL) mov %eax, %ds mov %eax, %es - push %esp /* struct cpu_regs * */ + mov %esp, %eax /* struct cpu_regs * */ call do_syscall - add $4, %esp RESTORE_ALL @@ -222,9 +208,8 @@ ENTRY(entry_SYSENTER) mov %eax, %ds mov %eax, %es - push %esp /* struct cpu_regs * */ + mov %esp, %eax /* struct cpu_regs * */ call do_sysenter - add $4, %esp RESTORE_ALL diff --git a/arch/x86/hvm/head.S b/arch/x86/hvm/head.S index f94dd0b..a6bb5f5 100644 --- a/arch/x86/hvm/head.S +++ b/arch/x86/hvm/head.S @@ -73,7 +73,7 @@ GLOBAL(_elf_start) /* HVM common setup. */ #ifdef __x86_64__ lea .Lmain_err_msg(%rip), %rdi #else - push $.Lmain_err_msg + mov $.Lmain_err_msg, %eax #endif call panic ENDFUNC(_elf_start) diff --git a/arch/x86/pv/head.S b/arch/x86/pv/head.S index 0b2592f..36c9a05 100644 --- a/arch/x86/pv/head.S +++ b/arch/x86/pv/head.S @@ -35,7 +35,7 @@ GLOBAL(_elf_start) #ifdef __x86_64__ lea .Lmain_err_msg(%rip), %rdi #else - push $.Lmain_err_msg + mov $.Lmain_err_msg, %eax #endif call panic ENDFUNC(_elf_start) diff --git a/build/common.mk b/build/common.mk index 644961c..a6e270f 100644 --- a/build/common.mk +++ b/build/common.mk @@ -43,7 +43,7 @@ COMMON_CFLAGS += -Wno-unused-parameter -Winline COMMON_AFLAGS-x86_32 := -m32 COMMON_AFLAGS-x86_64 := -m64 -COMMON_CFLAGS-x86_32 := -m32 +COMMON_CFLAGS-x86_32 := -m32 -mregparm=3 COMMON_CFLAGS-x86_64 := -m64 defcfg-pv := $(ROOT)/config/default-pv.cfg.in diff --git a/tests/memop-seg/asm.S b/tests/memop-seg/asm.S index 08e87b2..be9fb09 100644 --- a/tests/memop-seg/asm.S +++ b/tests/memop-seg/asm.S @@ -12,9 +12,6 @@ ENTRY(stub_\seg\()_abs) /* exinfo_t stub_\seg_abs(unsigned long addr) */ * Switch segment if necessary. The old segment is preserved on the * stack for the duration of the test. */ - .local stack_adj - stack_adj = 0 - .if \load_seg .ifeqs "\seg", "none" push %ds @@ -25,19 +22,22 @@ ENTRY(stub_\seg\()_abs) /* exinfo_t stub_\seg_abs(unsigned long addr) */ push $(GDTE_AVAIL1 << 3 | 3) pop %\seg .endif - stack_adj = 1 .endif #endif - /* No exception if we don't fault. Also reused by the 64bit case. */ - xor %eax, %eax - /* The bottom bit of 'addr' encodes FEP. */ #ifdef __i386__ - testb $1, (1 + stack_adj)*4(%esp) + testb $1, %al #else testb $1, %dil #endif + + /* + * No exception if we don't fault. + * Reused by the 64bit case, and careful to not clobber flags. + */ + mov $0, %eax + jz 1f _ASM_XEN_FEP @@ -82,9 +82,6 @@ ENTRY(stub_\seg\()_\reg) /* exinfo_t stub_\seg_\reg(unsigned long addr) */ * Switch segment if necessary. The old segment is preserved on the * stack for the duration of the test. */ - .local stack_adj - stack_adj = 0 - .if \load_seg .ifeqs "\seg", "none" push %ds @@ -95,7 +92,6 @@ ENTRY(stub_\seg\()_\reg) /* exinfo_t stub_\seg_\reg(unsigned long addr) */ push $(GDTE_AVAIL1 << 3 | 3) pop %\seg .endif - stack_adj = 1 .endif #endif @@ -106,7 +102,7 @@ ENTRY(stub_\seg\()_\reg) /* exinfo_t stub_\seg_\reg(unsigned long addr) */ /* Move 'addr' into \reg */ #ifdef __i386__ - mov (1 + stack_adj)*4(%esp), %\reg + mov %eax, %\reg #else mov %rdi, %\reg #endif diff --git a/tests/nmi-taskswitch-priv/main.c b/tests/nmi-taskswitch-priv/main.c index b2e43f9..299117c 100644 --- a/tests/nmi-taskswitch-priv/main.c +++ b/tests/nmi-taskswitch-priv/main.c @@ -54,9 +54,8 @@ asm("exit_NMI_task:" "push %ebx;" "push %ebp;" - "push %esp;" + "mov %esp, %eax;" "call do_exception;" - "add $1*4, %esp;" "pop %ebp;" "pop %ebx;"