Trap handling in HVMlite domain is different from pv one.
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
}
printk("\n");
}
-#define read_cr2() \
- (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
static int handling_pg_fault = 0;
#include <xen/arch-x86_32.h>
#ifdef CONFIG_PARAVIRT
+
+#define KERNEL_DS __KERNEL_DS
+
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "Mini-OS")
ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _WORD hypercall_page)
lss stack_start,%esp
#else
+#define KERNEL_DS __KERN_DS
+
#include "x86_hvm.S"
movl stack_start,%esp
pushl %edx; \
pushl %ecx; \
pushl %ebx; \
- movl $(__KERNEL_DS),%edx; \
+ movl $(KERNEL_DS),%edx; \
movl %edx,%ds; \
movl %edx,%es;
movl ORIG_EAX(%esp), %edx # get the error code
movl %eax, ORIG_EAX(%esp)
movl %ecx, ES(%esp)
- movl $(__KERNEL_DS), %ecx
+ movl $(KERNEL_DS), %ecx
movl %ecx, %ds
movl %ecx, %es
movl %esp,%eax # pt_regs pointer
addl $8,%esp
RESTORE_ALL
+#ifdef CONFIG_PARAVIRT
# A note on the "critical region" in our callback handler.
# We want to avoid stacking callback handlers due to events occurring
# during handling of the last event. To do this, we keep events disabled
.byte 0x28 # iret
.byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
.byte 0x00,0x00 # jmp 11b
+
+#else
+
+ENTRY(hypervisor_callback)
+ pushl $0
+ pushl $do_hypervisor_callback
+ jmp do_exception
+
+#endif
# Hypervisor uses this for application faults while it executes.
ENTRY(failsafe_callback)
/* Macros */
.macro zeroentry sym
+#ifdef CONFIG_PARAVIRT
movq (%rsp),%rcx
movq 8(%rsp),%r11
addq $0x10,%rsp /* skip rcx and r11 */
+#endif
pushq $0 /* push error code/oldrax */
pushq %rax /* push real oldrax to the rdi slot */
leaq \sym(%rip),%rax
.endm
.macro errorentry sym
+#ifdef CONFIG_PARAVIRT
movq (%rsp),%rcx
movq 8(%rsp),%r11
addq $0x10,%rsp /* rsp points to the error code */
+#endif
pushq %rax
leaq \sym(%rip),%rax
jmp error_entry
#ifdef CONFIG_PARAVIRT
testl $NMI_MASK,2*8(%rsp)
jnz 2f
-#endif
/* Direct iret to kernel space. Correct CS and SS. */
orb $3,1*8(%rsp)
orb $3,4*8(%rsp)
+#endif
iretq
#ifdef CONFIG_PARAVIRT
jmp error_exit
+#ifdef CONFIG_PARAVIRT
/*
* Xen event (virtual interrupt) entry point.
*/
andb $KERNEL_CS_MASK,CS(%rsp) # CS might have changed
jmp 11b
+#else
+error_exit:
+ RESTORE_REST
+ RESTORE_ALL
+ HYPERVISOR_IRET 0
+
+/*
+ * Xen event (virtual interrupt) entry point.
+ */
+ENTRY(hypervisor_callback)
+ zeroentry do_hypervisor_callback
+
+#endif
ENTRY(failsafe_callback)
+#ifdef CONFIG_PARAVIRT
popq %rcx
popq %r11
+#endif
iretq
#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
+#define X86_EFLAGS_IF 0x00000200
+
#define __KERNEL_CS FLAT_KERNEL_CS
#define __KERNEL_DS FLAT_KERNEL_DS
#define __KERNEL_SS FLAT_KERNEL_SS
-
+#ifdef CONFIG_PARAVIRT
/*
* The use of 'barrier' in the following reflects their use as local-lock
barrier(); \
} while (0)
+#define irqs_disabled() \
+ HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask
+
+#else
+
+#if defined(__i386__)
+#define __SZ "l"
+#define __REG "e"
+#else
+#define __SZ "q"
+#define __REG "r"
+#endif
+
+#define __cli() asm volatile ( "cli" : : : "memory" )
+#define __sti() asm volatile ( "sti" : : : "memory" )
+
+#define __save_flags(x) \
+do { \
+ unsigned long __f; \
+ asm volatile ( "pushf" __SZ " ; pop" __SZ " %0" : "=g" (__f)); \
+ x = (__f & X86_EFLAGS_IF) ? 1 : 0; \
+} while (0)
+
+#define __restore_flags(x) \
+do { \
+ if (x) __sti(); \
+ else __cli(); \
+} while (0)
+
+#define __save_and_cli(x) \
+do { \
+ __save_flags(x); \
+ __cli(); \
+} while (0)
+
+static inline int irqs_disabled(void)
+{
+ int flag;
+
+ __save_flags(flag);
+ return !flag;
+}
+
+#endif
+
#define local_irq_save(x) __save_and_cli(x)
#define local_irq_restore(x) __restore_flags(x)
#define local_save_flags(x) __save_flags(x)
#define local_irq_disable() __cli()
#define local_irq_enable() __sti()
-#define irqs_disabled() \
- HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask
-
/* This is a barrier for the compiler only, NOT the processor! */
#define barrier() __asm__ __volatile__("": : :"memory")
#undef ADDR
+#ifdef CONFIG_PARAVIRT
+static inline unsigned long read_cr2(void)
+{
+ return HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2;
+}
+#else
+static inline unsigned long read_cr2(void)
+{
+ unsigned long cr2;
+
+ asm volatile ( "mov %%cr2,%0\n\t" : "=r" (cr2) );
+ return cr2;
+}
+#endif
+
#endif /* not assembly */
#endif /* _OS_H_ */