direct-io.hg

annotate xen/include/asm-x86/x86_32/asm_defns.h @ 15417:015d9abeacfb

i386: Simplify failsafe callback handling.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Jun 21 21:36:26 2007 +0100 (2007-06-21)
parents acb7aa72fac7
children
rev   line source
kaf24@3276 1 #ifndef __X86_32_ASM_DEFNS_H__
kaf24@3276 2 #define __X86_32_ASM_DEFNS_H__
kaf24@2916 3
kfraser@14586 4 #include <asm/percpu.h>
kfraser@14586 5
keir@15245 6 #ifdef CONFIG_FRAME_POINTER
kaf24@6573 7 /* Indicate special exception stack frame by inverting the frame pointer. */
kaf24@6573 8 #define SETUP_EXCEPTION_FRAME_POINTER \
kaf24@6573 9 movl %esp,%ebp; \
kaf24@6573 10 notl %ebp
keir@15245 11 #else
keir@15245 12 #define SETUP_EXCEPTION_FRAME_POINTER
keir@15245 13 #endif
keir@15245 14
keir@15245 15 #ifndef NDEBUG
keir@14761 16 #define ASSERT_INTERRUPT_STATUS(x) \
keir@14761 17 pushf; \
keir@14761 18 testb $X86_EFLAGS_IF>>8,1(%esp); \
keir@14761 19 j##x 1f; \
keir@14761 20 ud2a; \
keir@14761 21 1: addl $4,%esp;
kaf24@6573 22 #else
keir@14761 23 #define ASSERT_INTERRUPT_STATUS(x)
kaf24@6573 24 #endif
kaf24@6573 25
keir@14761 26 #define ASSERT_INTERRUPTS_ENABLED ASSERT_INTERRUPT_STATUS(nz)
keir@14761 27 #define ASSERT_INTERRUPTS_DISABLED ASSERT_INTERRUPT_STATUS(z)
keir@14761 28
kfraser@15417 29 #define SAVE_ALL_GPRS \
kfraser@15417 30 cld; \
kfraser@15417 31 pushl %eax; \
kfraser@15417 32 pushl %ebp; \
kfraser@15417 33 SETUP_EXCEPTION_FRAME_POINTER; \
kfraser@15417 34 pushl %edi; \
kfraser@15417 35 pushl %esi; \
kfraser@15417 36 pushl %edx; \
kfraser@15417 37 pushl %ecx; \
kfraser@15417 38 pushl %ebx
kfraser@15417 39
kfraser@15412 40 /*
kfraser@15412 41 * Saves all register state into an exception/interrupt stack frame.
kfraser@15412 42 * Returns to the caller at <xen_lbl> if the interrupted context is within
kfraser@15412 43 * Xen; at <vm86_lbl> if the interrupted context is vm86; or falls through
kfraser@15412 44 * if the interrupted context is an ordinary guest protected-mode context.
kfraser@15412 45 * In all cases %ecx contains __HYPERVISOR_DS. %ds/%es are guaranteed to
kfraser@15412 46 * contain __HYPERVISOR_DS unless control passes to <xen_lbl>, in which case
kfraser@15412 47 * the caller is reponsible for validity of %ds/%es.
kfraser@15412 48 */
kfraser@15412 49 #define SAVE_ALL(xen_lbl, vm86_lbl) \
kfraser@15417 50 SAVE_ALL_GPRS; \
kaf24@6564 51 testl $(X86_EFLAGS_VM),UREGS_eflags(%esp); \
kfraser@15412 52 mov %ds,%edi; \
kfraser@15412 53 mov %es,%esi; \
kfraser@15412 54 mov $(__HYPERVISOR_DS),%ecx; \
kfraser@15412 55 jnz 86f; \
kfraser@15412 56 .text 1; \
kfraser@15412 57 86: call setup_vm86_frame; \
kfraser@15412 58 jmp vm86_lbl; \
kfraser@15412 59 .previous; \
kfraser@15412 60 testb $3,UREGS_cs(%esp); \
kfraser@15412 61 jz xen_lbl; \
kfraser@15412 62 /* \
kfraser@15412 63 * We are the outermost Xen context, but our \
kfraser@15412 64 * life is complicated by NMIs and MCEs. These \
kfraser@15412 65 * could occur in our critical section and \
kfraser@15412 66 * pollute %ds and %es. We have to detect that \
kfraser@15412 67 * this has occurred and avoid saving Xen DS/ES \
kfraser@15412 68 * values to the guest stack frame. \
kfraser@15412 69 */ \
kfraser@15412 70 cmpw %cx,%di; \
kfraser@15412 71 mov %ecx,%ds; \
kfraser@15412 72 mov %fs,UREGS_fs(%esp); \
kfraser@15412 73 cmove UREGS_ds(%esp),%edi; \
kfraser@15412 74 cmpw %cx,%si; \
kfraser@15412 75 mov %edi,UREGS_ds(%esp); \
kfraser@15412 76 cmove UREGS_es(%esp),%esi; \
kfraser@15412 77 mov %ecx,%es; \
kfraser@15412 78 mov %gs,UREGS_gs(%esp); \
kfraser@15412 79 mov %esi,UREGS_es(%esp)
kaf24@2916 80
kaf24@3958 81 #ifdef PERF_COUNTERS
kfraser@14586 82 #define PERFC_INCR(_name,_idx,_cur) \
kfraser@14586 83 pushl _cur; \
kfraser@14586 84 movl VCPU_processor(_cur),_cur; \
kfraser@14586 85 shll $PERCPU_SHIFT,_cur; \
kfraser@14586 86 incl per_cpu__perfcounters+_name*4(_cur,_idx,4);\
kfraser@14586 87 popl _cur
kaf24@3958 88 #else
kfraser@14586 89 #define PERFC_INCR(_name,_idx,_cur)
kaf24@3958 90 #endif
kaf24@3958 91
kaf24@8991 92 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
kaf24@8991 93 #define FIXUP_RING0_GUEST_STACK \
kaf24@8991 94 testl $2,8(%esp); \
kaf24@8991 95 jnz 1f; /* rings 2 & 3 permitted */ \
kaf24@8991 96 testl $1,8(%esp); \
kaf24@8991 97 jz 2f; \
kaf24@8991 98 ud2; /* ring 1 should not be used */ \
kaf24@8991 99 2:cmpl $(__HYPERVISOR_VIRT_START),%esp; \
kaf24@8991 100 jge 1f; \
kaf24@8991 101 call fixup_ring0_guest_stack; \
kaf24@8991 102 1:
kaf24@8991 103 #else
kaf24@8991 104 #define FIXUP_RING0_GUEST_STACK
kaf24@8991 105 #endif
kaf24@8991 106
kaf24@3621 107 #define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
kaf24@4700 108 #define XBUILD_SMP_INTERRUPT(x,v) \
kaf24@4700 109 asmlinkage void x(void); \
kaf24@4700 110 __asm__( \
kaf24@4700 111 "\n"__ALIGN_STR"\n" \
kaf24@8832 112 ".globl " STR(x) "\n\t" \
kaf24@4700 113 STR(x) ":\n\t" \
kaf24@4700 114 "pushl $"#v"<<16\n\t" \
kaf24@8991 115 STR(FIXUP_RING0_GUEST_STACK) \
kfraser@15412 116 STR(SAVE_ALL(1f,1f)) "\n\t" \
kfraser@15412 117 "1:movl %esp,%eax\n\t" \
kaf24@4700 118 "pushl %eax\n\t" \
kaf24@4700 119 "call "STR(smp_##x)"\n\t" \
kaf24@4700 120 "addl $4,%esp\n\t" \
kaf24@3621 121 "jmp ret_from_intr\n");
kaf24@3621 122
kaf24@4700 123 #define BUILD_COMMON_IRQ() \
kaf24@4700 124 __asm__( \
kaf24@4700 125 "\n" __ALIGN_STR"\n" \
kaf24@4700 126 "common_interrupt:\n\t" \
kaf24@8991 127 STR(FIXUP_RING0_GUEST_STACK) \
kfraser@15412 128 STR(SAVE_ALL(1f,1f)) "\n\t" \
kfraser@15412 129 "1:movl %esp,%eax\n\t" \
kaf24@4700 130 "pushl %eax\n\t" \
kaf24@4700 131 "call " STR(do_IRQ) "\n\t" \
kaf24@4700 132 "addl $4,%esp\n\t" \
kaf24@3621 133 "jmp ret_from_intr\n");
kaf24@3621 134
kaf24@3621 135 #define IRQ_NAME2(nr) nr##_interrupt(void)
kaf24@3621 136 #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
kaf24@3621 137
kaf24@4700 138 #define BUILD_IRQ(nr) \
kaf24@4700 139 asmlinkage void IRQ_NAME(nr); \
kaf24@4700 140 __asm__( \
kaf24@4700 141 "\n"__ALIGN_STR"\n" \
kaf24@4700 142 STR(IRQ) #nr "_interrupt:\n\t" \
kaf24@4700 143 "pushl $"#nr"<<16\n\t" \
kaf24@3621 144 "jmp common_interrupt");
kaf24@3621 145
kaf24@3276 146 #endif /* __X86_32_ASM_DEFNS_H__ */