ia64/xen-unstable

view xen/include/asm-x86/x86_64/asm_defns.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 608ddb14259b
children
line source
1 #ifndef __X86_64_ASM_DEFNS_H__
2 #define __X86_64_ASM_DEFNS_H__
4 #include <asm/percpu.h>
6 #ifdef CONFIG_FRAME_POINTER
7 /* Indicate special exception stack frame by inverting the frame pointer. */
8 #define SETUP_EXCEPTION_FRAME_POINTER \
9 movq %rsp,%rbp; \
10 notq %rbp
11 #else
12 #define SETUP_EXCEPTION_FRAME_POINTER
13 #endif
15 #ifndef NDEBUG
16 #define ASSERT_INTERRUPT_STATUS(x) \
17 pushf; \
18 testb $X86_EFLAGS_IF>>8,1(%rsp); \
19 j##x 1f; \
20 ud2a; \
21 1: addq $8,%rsp;
22 #else
23 #define ASSERT_INTERRUPT_STATUS(x)
24 #endif
26 #define ASSERT_INTERRUPTS_ENABLED ASSERT_INTERRUPT_STATUS(nz)
27 #define ASSERT_INTERRUPTS_DISABLED ASSERT_INTERRUPT_STATUS(z)
29 #define SAVE_ALL \
30 cld; \
31 pushq %rdi; \
32 pushq %rsi; \
33 pushq %rdx; \
34 pushq %rcx; \
35 pushq %rax; \
36 pushq %r8; \
37 pushq %r9; \
38 pushq %r10; \
39 pushq %r11; \
40 pushq %rbx; \
41 pushq %rbp; \
42 SETUP_EXCEPTION_FRAME_POINTER; \
43 pushq %r12; \
44 pushq %r13; \
45 pushq %r14; \
46 pushq %r15;
48 #define RESTORE_ALL \
49 popq %r15; \
50 popq %r14; \
51 popq %r13; \
52 popq %r12; \
53 popq %rbp; \
54 popq %rbx; \
55 popq %r11; \
56 popq %r10; \
57 popq %r9; \
58 popq %r8; \
59 popq %rax; \
60 popq %rcx; \
61 popq %rdx; \
62 popq %rsi; \
63 popq %rdi;
65 #ifdef PERF_COUNTERS
66 #define PERFC_INCR(_name,_idx,_cur) \
67 pushq _cur; \
68 movslq VCPU_processor(_cur),_cur; \
69 pushq %rdx; \
70 leaq per_cpu__perfcounters(%rip),%rdx; \
71 shlq $PERCPU_SHIFT,_cur; \
72 addq %rdx,_cur; \
73 popq %rdx; \
74 incl _name*4(_cur,_idx,4); \
75 popq _cur
76 #else
77 #define PERFC_INCR(_name,_idx,_cur)
78 #endif
80 /* Work around AMD erratum #88 */
81 #define safe_swapgs \
82 "mfence; swapgs;"
84 #ifdef __sun__
85 #define REX64_PREFIX "rex64\\"
86 #else
87 #define REX64_PREFIX "rex64/"
88 #endif
90 #define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
91 #define XBUILD_SMP_INTERRUPT(x,v) \
92 asmlinkage void x(void); \
93 __asm__( \
94 "\n"__ALIGN_STR"\n" \
95 ".globl " STR(x) "\n\t" \
96 STR(x) ":\n\t" \
97 "pushq $0\n\t" \
98 "movl $"#v",4(%rsp)\n\t" \
99 STR(SAVE_ALL) \
100 "movq %rsp,%rdi\n\t" \
101 "callq "STR(smp_##x)"\n\t" \
102 "jmp ret_from_intr\n");
104 #define BUILD_COMMON_IRQ() \
105 __asm__( \
106 "\n" __ALIGN_STR"\n" \
107 "common_interrupt:\n\t" \
108 STR(SAVE_ALL) \
109 "movq %rsp,%rdi\n\t" \
110 "callq " STR(do_IRQ) "\n\t" \
111 "jmp ret_from_intr\n");
113 #define IRQ_NAME2(nr) nr##_interrupt(void)
114 #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
116 #define BUILD_IRQ(nr) \
117 asmlinkage void IRQ_NAME(nr); \
118 __asm__( \
119 "\n"__ALIGN_STR"\n" \
120 STR(IRQ) #nr "_interrupt:\n\t" \
121 "pushq $0\n\t" \
122 "movl $"#nr",4(%rsp)\n\t" \
123 "jmp common_interrupt");
125 #endif /* __X86_64_ASM_DEFNS_H__ */