#define CLGI .byte 0x0F,0x01,0xDD
ENTRY(svm_asm_do_resume)
- GET_CURRENT(%rbx)
+ GET_CURRENT(bx)
.Lsvm_do_resume:
call svm_intr_assist
mov %rsp,%rdi
VMRUN
- GET_CURRENT(%rax)
+ GET_CURRENT(ax)
push %rdi
push %rsi
push %rdx
push %r10
push %r11
push %rbx
- GET_CURRENT(%rbx)
+ GET_CURRENT(bx)
push %rbp
push %r12
push %r13
BUG /* vmx_vmentry_failure() shouldn't return. */
ENTRY(vmx_asm_do_vmentry)
- GET_CURRENT(%rbx)
+ GET_CURRENT(bx)
jmp .Lvmx_do_vmentry
.Lvmx_goto_emulator:
UNLIKELY_END(msi_check)
movl UREGS_rax(%rsp),%eax
- GET_CURRENT(%rbx)
+ GET_CURRENT(bx)
cmpl $NR_hypercalls,%eax
jae compat_bad_hypercall
/* This mustn't modify registers other than %rax. */
ENTRY(cr4_pv32_restore)
push %rdx
- GET_CPUINFO_FIELD(cr4, %rdx)
+ GET_CPUINFO_FIELD(cr4, dx)
mov (%rdx), %rax
test $X86_CR4_SMEP|X86_CR4_SMAP,%eax
jnz 0f
pushq %rcx
pushq $0
SAVE_VOLATILE TRAP_syscall
- GET_CURRENT(%rbx)
+ GET_CURRENT(bx)
movq VCPU_domain(%rbx),%rcx
cmpb $0,DOMAIN_is_32bit_pv(%rcx)
je switch_to_kernel
pushq %rcx
pushq $0
SAVE_VOLATILE TRAP_syscall
- GET_CURRENT(%rbx)
+ GET_CURRENT(bx)
testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
jz switch_to_kernel
pushq $0 /* null rip */
pushq $0
SAVE_VOLATILE TRAP_syscall
- GET_CURRENT(%rbx)
+ GET_CURRENT(bx)
cmpb $0,VCPU_sysenter_disables_events(%rbx)
movq VCPU_sysenter_addr(%rbx),%rax
setne %cl
call check_for_unexpected_msi
UNLIKELY_END(msi_check)
- GET_CURRENT(%rbx)
+ GET_CURRENT(bx)
/* Check that the callback is non-null. */
leaq VCPU_int80_bounce(%rbx),%rdx
call show_page_walk
ENTRY(dom_crash_sync_extable)
# Get out of the guest-save area of the stack.
- GET_STACK_BASE(%rax)
+ GET_STACK_END(ax)
leaq STACK_CPUINFO_FIELD(guest_cpu_user_regs)(%rax),%rsp
# create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
- __GET_CURRENT(%rax)
+ __GET_CURRENT(ax)
movq VCPU_domain(%rax),%rax
testb $1,DOMAIN_is_32bit_pv(%rax)
setz %al
/* No special register assumptions. */
ENTRY(ret_from_intr)
- GET_CURRENT(%rbx)
+ GET_CURRENT(bx)
testb $3,UREGS_cs(%rsp)
jz restore_all_xen
movq VCPU_domain(%rbx),%rax
GLOBAL(handle_exception)
SAVE_ALL CLAC
handle_exception_saved:
- GET_CURRENT(%rbx)
+ GET_CURRENT(bx)
testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
jz exception_with_ints_disabled
testb $3,UREGS_cs(%rsp)
jz 1f
/* Interrupted guest context. Copy the context to stack bottom. */
- GET_CPUINFO_FIELD(guest_cpu_user_regs,%rdi)
+ GET_CPUINFO_FIELD(guest_cpu_user_regs,di)
movq %rsp,%rsi
movl $UREGS_kernel_sizeof/8,%ecx
movq %rdi,%rsp
/* We want to get straight to the IRET on the NMI exit path. */
testb $3,UREGS_cs(%rsp)
jz restore_all_xen
- GET_CURRENT(%rbx)
+ GET_CURRENT(bx)
/* Send an IPI to ourselves to cover for the lack of event checking. */
movl VCPU_processor(%rbx),%eax
shll $IRQSTAT_shift,%eax
UNLIKELY_DONE(mp, tag); \
__UNLIKELY_END(tag)
-#define STACK_CPUINFO_FIELD(field) (STACK_SIZE-CPUINFO_sizeof+CPUINFO_##field)
-#define GET_STACK_BASE(reg) \
- movq $~(STACK_SIZE-1),reg; \
- andq %rsp,reg
+#define STACK_CPUINFO_FIELD(field) (1 - CPUINFO_sizeof + CPUINFO_##field)
+#define GET_STACK_END(reg) \
+ movl $STACK_SIZE-1, %e##reg; \
+ orq %rsp, %r##reg
#define GET_CPUINFO_FIELD(field, reg) \
- GET_STACK_BASE(reg); \
- addq $STACK_CPUINFO_FIELD(field),reg
+ GET_STACK_END(reg); \
+ addq $STACK_CPUINFO_FIELD(field), %r##reg
#define __GET_CURRENT(reg) \
- movq STACK_CPUINFO_FIELD(current_vcpu)(reg),reg
+ movq STACK_CPUINFO_FIELD(current_vcpu)(%r##reg), %r##reg
#define GET_CURRENT(reg) \
- GET_STACK_BASE(reg); \
+ GET_STACK_END(reg); \
__GET_CURRENT(reg)
#ifndef NDEBUG
register unsigned long sp asm("rsp");
#endif
- return (struct cpu_info *)((sp & ~(STACK_SIZE-1)) + STACK_SIZE) - 1;
+ return (struct cpu_info *)((sp | (STACK_SIZE - 1)) + 1) - 1;
}
#define get_current() (get_cpu_info()->current_vcpu)