for both 32-bit apps on 64-bit kernels and 32bit kernels.
Also removing the needless re-enabling of events on x86-64's 64-bit
syscall path as well as it's 32-bit int80 path (the latter accompanied
by telling Xen not to disable them in the first place).
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
#endif /* !CONFIG_XEN */
CFI_ENDPROC
+ # pv sysenter call handler stub
+ENTRY(sysenter_entry_pv)
+ RING0_INT_FRAME
+ movl $__USER_DS,16(%esp)
+ movl %ebp,12(%esp)
+ movl $__USER_CS,4(%esp)
+ addl $4,%esp
+ /* +5*4 is SS:ESP,EFLAGS,CS:EIP. +8 is esp0 setting. */
+ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
+/*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
+ cmpl $__PAGE_OFFSET-3,%ebp
+ jae syscall_fault
+1: movl (%ebp),%ebp
+.section __ex_table,"a"
+ .align 4
+ .long 1b,syscall_fault
+.previous
+ /* fall through */
+ CFI_ENDPROC
+ENDPROC(sysenter_entry_pv)
# system call handler stub
ENTRY(system_call)
void enable_sep_cpu(void)
{
-#ifndef CONFIG_X86_NO_TSS
+#ifndef CONFIG_XEN
int cpu = get_cpu();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
- put_cpu();
+#else
+ extern asmlinkage void sysenter_entry_pv(void);
+ static struct callback_register sysenter = {
+ .type = CALLBACKTYPE_sysenter,
+ .address = { __KERNEL_CS, (unsigned long)sysenter_entry_pv },
+ };
+
+ if (!boot_cpu_has(X86_FEATURE_SEP))
+ return;
+
+ get_cpu();
+
+ if (xen_feature(XENFEAT_supervisor_mode_kernel))
+ sysenter.address.eip = (unsigned long)sysenter_entry;
+
+ switch (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter)) {
+ case 0:
+ break;
+#if CONFIG_XEN_COMPAT < 0x030200
+ case -ENOSYS:
+ sysenter.type = CALLBACKTYPE_sysenter_deprecated;
+ if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) == 0)
+ break;
#endif
+ default:
+ clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
+ break;
+ }
+#endif
+ put_cpu();
}
/*
{
syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
-#ifdef CONFIG_XEN
- if (boot_cpu_has(X86_FEATURE_SEP)) {
- static struct callback_register __initdata sysenter = {
- .type = CALLBACKTYPE_sysenter,
- .address = { __KERNEL_CS, (unsigned long)sysenter_entry },
- };
-
- if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0)
- clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
- }
-#endif
-
#ifdef CONFIG_COMPAT_VDSO
__set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
audit-class-$(CONFIG_AUDIT) := audit.o
obj-$(CONFIG_IA32_EMULATION) += $(audit-class-y)
+syscall32-types-y := sysenter syscall
+syscall32-types-$(subst 1,$(CONFIG_XEN),$(shell expr $(CONFIG_XEN_COMPAT)0 '<' 0x0302000)) += int80
+
$(obj)/syscall32_syscall.o: \
- $(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so)
+ $(foreach F,$(syscall32-types-y),$(obj)/vsyscall-$F.so)
# Teach kbuild about targets
-targets := $(foreach F,sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
+targets := $(foreach F,$(syscall32-types-y),vsyscall-$F.o vsyscall-$F.so)
# The DSO images are built using a special linker script
quiet_cmd_syscall = SYSCALL $@
-Wl,-soname=linux-gate.so.1 -o $@ \
-Wl,-T,$(filter-out FORCE,$^)
-$(obj)/vsyscall-int80.so \
-$(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \
+$(foreach F,$(syscall32-types-y),$(obj)/vsyscall-$F.so): \
$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
$(call if_changed,syscall)
AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32 -Iarch/i386/kernel
AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32 -Iarch/i386/kernel
-
-ifdef CONFIG_XEN
AFLAGS_vsyscall-int80.o = -m32 -Wa,-32 -Iarch/i386/kernel
-CFLAGS_syscall32.o += -DUSE_INT80
-AFLAGS_syscall32_syscall.o += -DUSE_INT80
-
-$(obj)/syscall32_syscall.o: $(obj)/vsyscall-int80.so
-endif
* %ebp user stack
* 0(%ebp) Arg6
*
- * Interrupts off.
+ * Interrupts on.
*
* This is purely a fast path. For anything complicated we use the int 0x80
* path below. Set up a complete hardware stack frame to share code
*/
ENTRY(ia32_sysenter_target)
CFI_STARTPROC32 simple
- CFI_DEF_CFA rsp,0
- CFI_REGISTER rsp,rbp
- __swapgs
- movq %gs:pda_kernelstack, %rsp
- addq $(PDA_STACKOFFSET),%rsp
- /*
- * No need to follow this irqs on/off section: the syscall
- * disabled irqs, here we enable it straight after entry:
- */
- XEN_UNBLOCK_EVENTS(%r11)
- __sti
+ CFI_DEF_CFA rsp,SS+8-RIP+16
+ /*CFI_REL_OFFSET ss,SS-RIP+16*/
+ CFI_REL_OFFSET rsp,RSP-RIP+16
+ /*CFI_REL_OFFSET rflags,EFLAGS-RIP+16*/
+ /*CFI_REL_OFFSET cs,CS-RIP+16*/
+ CFI_REL_OFFSET rip,RIP-RIP+16
+ CFI_REL_OFFSET r11,8
+ CFI_REL_OFFSET rcx,0
+ movq 8(%rsp),%r11
+ CFI_RESTORE r11
+ popq %rcx
+ CFI_ADJUST_CFA_OFFSET -8
+ CFI_RESTORE rcx
movl %ebp,%ebp /* zero extension */
- pushq $__USER32_DS
- CFI_ADJUST_CFA_OFFSET 8
- /*CFI_REL_OFFSET ss,0*/
- pushq %rbp
- CFI_ADJUST_CFA_OFFSET 8
- CFI_REL_OFFSET rsp,0
- pushfq
- CFI_ADJUST_CFA_OFFSET 8
- /*CFI_REL_OFFSET rflags,0*/
- movl $VSYSCALL32_SYSEXIT, %r10d
- CFI_REGISTER rip,r10
- pushq $__USER32_CS
- CFI_ADJUST_CFA_OFFSET 8
- /*CFI_REL_OFFSET cs,0*/
- movl %eax, %eax
- pushq %r10
- CFI_ADJUST_CFA_OFFSET 8
- CFI_REL_OFFSET rip,0
- pushq %rax
- CFI_ADJUST_CFA_OFFSET 8
+ movl %eax,%eax
+ movl $__USER32_DS,40(%rsp)
+ movq %rbp,32(%rsp)
+ movl $__USER32_CS,16(%rsp)
+ movl $VSYSCALL32_SYSEXIT,8(%rsp)
+ movq %rax,(%rsp)
cld
SAVE_ARGS 0,0,0
/* no need to do an access_ok check here because rbp has been
GET_THREAD_INFO(%r10)
orl $TS_COMPAT,threadinfo_status(%r10)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
- CFI_REMEMBER_STATE
jnz sysenter_tracesys
sysenter_do_call:
cmpl $(IA32_NR_syscalls-1),%eax
IA32_ARG_FIXUP 1
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
- GET_THREAD_INFO(%r10)
- XEN_BLOCK_EVENTS(%r11)
- __cli
- TRACE_IRQS_OFF
- testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
- jnz int_ret_from_sys_call
- andl $~TS_COMPAT,threadinfo_status(%r10)
- /* clear IF, that popfq doesn't enable interrupts early */
- andl $~0x200,EFLAGS-R11(%rsp)
- RESTORE_ARGS 1,24,1,1,1,1
- popfq
- CFI_ADJUST_CFA_OFFSET -8
- /*CFI_RESTORE rflags*/
- popq %rcx /* User %esp */
- CFI_ADJUST_CFA_OFFSET -8
- CFI_REGISTER rsp,rcx
- movl $VSYSCALL32_SYSEXIT,%edx /* User %eip */
- CFI_REGISTER rip,rdx
- TRACE_IRQS_ON
- __swapgs
- XEN_UNBLOCK_EVENTS(%r11)
- __sti /* sti only takes effect after the next instruction */
- /* sysexit */
- .byte 0xf, 0x35 /* TBD */
+ jmp int_ret_from_sys_call
sysenter_tracesys:
- CFI_RESTORE_STATE
SAVE_REST
CLEAR_RREGS
movq $-ENOSYS,RAX(%rsp) /* really needed? */
* %esp user stack
* 0(%esp) Arg6
*
- * Interrupts off.
+ * Interrupts on.
*
* This is purely a fast path. For anything complicated we use the int 0x80
* path below. Set up a complete hardware stack frame to share code
*/
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
- CFI_DEF_CFA rsp,PDA_STACKOFFSET
- CFI_REGISTER rip,rcx
- /*CFI_REGISTER rflags,r11*/
- __swapgs
- movl %esp,%r8d
- CFI_REGISTER rsp,r8
- movq %gs:pda_kernelstack,%rsp
- /*
- * No need to follow this irqs on/off section: the syscall
- * disabled irqs and here we enable it straight after entry:
- */
- XEN_UNBLOCK_EVENTS(%r11)
- __sti
- SAVE_ARGS 8,1,1
+ CFI_DEF_CFA rsp,SS+8-RIP+16
+ /*CFI_REL_OFFSET ss,SS-RIP+16*/
+ CFI_REL_OFFSET rsp,RSP-RIP+16
+ /*CFI_REL_OFFSET rflags,EFLAGS-RIP+16*/
+ /*CFI_REL_OFFSET cs,CS-RIP+16*/
+ CFI_REL_OFFSET rip,RIP-RIP+16
movl %eax,%eax /* zero extension */
+ movl RSP-RIP+16(%rsp),%r8d
+ SAVE_ARGS -8,1,1
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
- movq %rcx,RIP-ARGOFFSET(%rsp)
- CFI_REL_OFFSET rip,RIP-ARGOFFSET
movq %rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
movl %ebp,%ecx
- movq $__USER32_CS,CS-ARGOFFSET(%rsp)
- movq $__USER32_DS,SS-ARGOFFSET(%rsp)
- movq %r11,EFLAGS-ARGOFFSET(%rsp)
- /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
- movq %r8,RSP-ARGOFFSET(%rsp)
- CFI_REL_OFFSET rsp,RSP-ARGOFFSET
+ movl $__USER32_CS,CS-ARGOFFSET(%rsp)
+ movl $__USER32_DS,SS-ARGOFFSET(%rsp)
/* no need to do an access_ok check here because r8 has been
32bit zero extended */
/* hardware stack frame is complete now */
GET_THREAD_INFO(%r10)
orl $TS_COMPAT,threadinfo_status(%r10)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
- CFI_REMEMBER_STATE
jnz cstar_tracesys
cstar_do_call:
cmpl $IA32_NR_syscalls-1,%eax
IA32_ARG_FIXUP 1
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
- GET_THREAD_INFO(%r10)
- XEN_BLOCK_EVENTS(%r11)
- __cli
- TRACE_IRQS_OFF
- testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
- jnz int_ret_from_sys_call
- andl $~TS_COMPAT,threadinfo_status(%r10)
- RESTORE_ARGS 1,-ARG_SKIP,1,1,1
- movl RIP-ARGOFFSET(%rsp),%ecx
- CFI_REGISTER rip,rcx
- movl EFLAGS-ARGOFFSET(%rsp),%r11d
- /*CFI_REGISTER rflags,r11*/
- TRACE_IRQS_ON
- movl RSP-ARGOFFSET(%rsp),%esp
- CFI_RESTORE rsp
- __swapgs
- sysretl /* TBD */
+ jmp int_ret_from_sys_call
cstar_tracesys:
- CFI_RESTORE_STATE
SAVE_REST
CLEAR_RREGS
movq $-ENOSYS,RAX(%rsp) /* really needed? */
* Arguments are zero extended. For system calls that want sign extension and
* take long arguments a wrapper is needed. Most calls can just be called
* directly.
- * Assumes it is only called from user space and entered with interrupts off.
+ * Assumes it is only called from user space and entered with interrupts on.
*/
ENTRY(ia32_syscall)
CFI_STARTPROC simple
- CFI_DEF_CFA rsp,SS+8-RIP
- /*CFI_REL_OFFSET ss,SS-RIP*/
- CFI_REL_OFFSET rsp,RSP-RIP
- /*CFI_REL_OFFSET rflags,EFLAGS-RIP*/
- /*CFI_REL_OFFSET cs,CS-RIP*/
- CFI_REL_OFFSET rip,RIP-RIP
- __swapgs
- /*
- * No need to follow this irqs on/off section: the syscall
- * disabled irqs and here we enable it straight after entry:
- */
- XEN_UNBLOCK_EVENTS(%r11)
- __sti
- movq (%rsp),%rcx
+ CFI_DEF_CFA rsp,SS+8-RIP+16
+ /*CFI_REL_OFFSET ss,SS-RIP+16*/
+ CFI_REL_OFFSET rsp,RSP-RIP+16
+ /*CFI_REL_OFFSET rflags,EFLAGS-RIP+16*/
+ /*CFI_REL_OFFSET cs,CS-RIP+16*/
+ CFI_REL_OFFSET rip,RIP-RIP+16
+ CFI_REL_OFFSET r11,8
+ CFI_REL_OFFSET rcx,0
movq 8(%rsp),%r11
- addq $0x10,%rsp /* skip rcx and r11 */
+ CFI_RESTORE r11
+ popq %rcx
+ CFI_ADJUST_CFA_OFFSET -8
+ CFI_RESTORE rcx
movl %eax,%eax
- pushq %rax
- CFI_ADJUST_CFA_OFFSET 8
+ movq %rax,(%rsp)
cld
-/* 1: jmp 1b */
/* note the registers are not zero extended to the sf.
this could be a problem. */
SAVE_ARGS 0,0,1
#include <asm/proto.h>
#include <asm/tlbflush.h>
#include <asm/ia32_unistd.h>
+#include <xen/interface/callback.h>
-#ifdef USE_INT80
-extern unsigned char syscall32_int80[], syscall32_int80_end[];
-#endif
extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
extern int sysctl_vsyscall32;
char *syscall32_page;
-#ifndef USE_INT80
static int use_sysenter = -1;
+
+#if CONFIG_XEN_COMPAT < 0x030200
+extern unsigned char syscall32_int80[], syscall32_int80_end[];
+static int use_int80 = 1;
#endif
static struct page *
if (!syscall32_page)
panic("Cannot allocate syscall32 page");
-#ifdef USE_INT80
- /*
- * At this point we use int 0x80.
- */
- memcpy(syscall32_page, syscall32_int80,
- syscall32_int80_end - syscall32_int80);
-#else
+#if CONFIG_XEN_COMPAT < 0x030200
+ if (use_int80) {
+ memcpy(syscall32_page, syscall32_int80,
+ syscall32_int80_end - syscall32_int80);
+ } else
+#endif
if (use_sysenter > 0) {
memcpy(syscall32_page, syscall32_sysenter,
syscall32_sysenter_end - syscall32_sysenter);
memcpy(syscall32_page, syscall32_syscall,
syscall32_syscall_end - syscall32_syscall);
}
-#endif
return 0;
}
/* May not be __init: called during resume */
void syscall32_cpu_init(void)
{
-#ifndef USE_INT80
- if (use_sysenter < 0)
- use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
+ static struct callback_register cstar = {
+ .type = CALLBACKTYPE_syscall32,
+ .address = (unsigned long)ia32_cstar_target
+ };
+ static struct callback_register sysenter = {
+ .type = CALLBACKTYPE_sysenter,
+ .address = (unsigned long)ia32_sysenter_target
+ };
/* Load these always in case some future AMD CPU supports
SYSENTER from compat mode too. */
- checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
- checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
- checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
-
- wrmsrl(MSR_CSTAR, ia32_cstar_target);
+ if ((HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0) ||
+ (HYPERVISOR_callback_op(CALLBACKOP_register, &cstar) < 0))
+#if CONFIG_XEN_COMPAT < 0x030200
+ return;
+ use_int80 = 0;
+#else
+ BUG();
#endif
+
+ if (use_sysenter < 0)
+ use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
}
.section ".init.data","aw"
-#ifdef USE_INT80
+#if CONFIG_XEN_COMPAT < 0x030200
.globl syscall32_int80
.globl syscall32_int80_end
* r11 eflags for syscall/sysret, temporary for C
* r12-r15,rbp,rbx saved by C code, not touched.
*
- * Interrupts are off on entry.
+ * Interrupts are enabled on entry.
* Only called from user space.
*
* XXX if we had a free scratch register we could save the RSP into the stack frame
_frame (RIP-0x10)
SAVE_ARGS -8,0
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
- /*
- * No need to follow this irqs off/on section - it's straight
- * and short:
- */
- XEN_UNBLOCK_EVENTS(%r11)
GET_THREAD_INFO(%rcx)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
CFI_REMEMBER_STATE
#endif
{ 19, 0|4, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
#ifdef CONFIG_IA32_EMULATION
- { IA32_SYSCALL_VECTOR, 3|4, __KERNEL_CS, (unsigned long)ia32_syscall},
+ { IA32_SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)ia32_syscall},
#endif
{ 0, 0, 0, 0 }
};
CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
+# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT=0x030002
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
+# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT=0x030002
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
+# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT=0x030002
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
+# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT=0x030002
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
+# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT=0x030002
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
+# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT=0x030002
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
+# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT=0x030002
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
+# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT=0x030002
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
+# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT=0x030002
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
if XEN
config XEN_INTERFACE_VERSION
hex
- default 0x00030206
+ default 0x00030207
menu "XEN"
config XEN_COMPAT_030004_AND_LATER
bool "3.0.4 and later"
+ config XEN_COMPAT_030100_AND_LATER
+ bool "3.1.0 and later"
+
config XEN_COMPAT_LATEST_ONLY
bool "no compatibility code"
config XEN_COMPAT
hex
default 0xffffff if XEN_COMPAT_LATEST_ONLY
+ default 0x030100 if XEN_COMPAT_030100_AND_LATER
default 0x030004 if XEN_COMPAT_030004_AND_LATER
default 0x030002 if XEN_COMPAT_030002_AND_LATER
default 0
void cpu_bringup(void)
{
cpu_init();
+ identify_cpu(cpu_data + smp_processor_id());
touch_softlockup_watchdog();
preempt_disable();
local_irq_enable();
* @extra_args == Operation-specific extra arguments (NULL if none).
*/
+/* ia64, x86: Callback for event delivery. */
#define CALLBACKTYPE_event 0
+
+/* x86: Failsafe callback when guest state cannot be restored by Xen. */
#define CALLBACKTYPE_failsafe 1
-#define CALLBACKTYPE_syscall 2 /* x86_64 only */
+
+/* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */
+#define CALLBACKTYPE_syscall 2
+
/*
- * sysenter is only available on x86_32 with the
- * supervisor_mode_kernel option enabled.
+ * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel
+ * feature is enabled. Do not use this callback type in new code.
*/
-#define CALLBACKTYPE_sysenter 3
+#define CALLBACKTYPE_sysenter_deprecated 3
+
+/* x86: Callback for NMI delivery. */
#define CALLBACKTYPE_nmi 4
+/*
+ * x86: sysenter is only available as follows:
+ * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled
+ * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs
+ * ('32-on-32-on-64', '32-on-64-on-64')
+ * [nb. also 64-bit guest applications on Intel CPUs
+ * ('64-on-64-on-64'), but syscall is preferred]
+ */
+#define CALLBACKTYPE_sysenter 5
+
+/*
+ * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs
+ * ('32-on-32-on-64', '32-on-64-on-64')
+ */
+#define CALLBACKTYPE_syscall32 7
+
/*
* Disable event deliver during callback? This flag is ignored for event and
* NMI callbacks: event delivery is unconditionally disabled.
typedef struct callback_unregister callback_unregister_t;
DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
+#if __XEN_INTERFACE_VERSION__ < 0x00030207
+#undef CALLBACKTYPE_sysenter
+#define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated
+#endif
+
#endif /* __XEN_PUBLIC_CALLBACK_H__ */
/*
#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_PUBLIC_XEN_COMPAT_H__
-#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030206
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030207
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */