ia64/xen-unstable

view xen/include/asm-x86/desc.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 1315b0901dea
children 8c305873f2b8
line source
1 #ifndef __ARCH_DESC_H
2 #define __ARCH_DESC_H
4 /*
5 * Xen reserves a memory page of GDT entries.
6 * No guest GDT entries exist beyond the Xen reserved area.
7 */
8 #define NR_RESERVED_GDT_PAGES 1
9 #define NR_RESERVED_GDT_BYTES (NR_RESERVED_GDT_PAGES * PAGE_SIZE)
10 #define NR_RESERVED_GDT_ENTRIES (NR_RESERVED_GDT_BYTES / 8)
12 #define LAST_RESERVED_GDT_PAGE \
13 (FIRST_RESERVED_GDT_PAGE + NR_RESERVED_GDT_PAGES - 1)
14 #define LAST_RESERVED_GDT_BYTE \
15 (FIRST_RESERVED_GDT_BYTE + NR_RESERVED_GDT_BYTES - 1)
16 #define LAST_RESERVED_GDT_ENTRY \
17 (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
19 #define LDT_ENTRY_SIZE 8
21 #if defined(__x86_64__)
23 #define FLAT_COMPAT_RING1_CS 0xe019 /* GDT index 259 */
24 #define FLAT_COMPAT_RING1_DS 0xe021 /* GDT index 260 */
25 #define FLAT_COMPAT_RING1_SS 0xe021 /* GDT index 260 */
26 #define FLAT_COMPAT_RING3_CS 0xe02b /* GDT index 261 */
27 #define FLAT_COMPAT_RING3_DS 0xe033 /* GDT index 262 */
28 #define FLAT_COMPAT_RING3_SS 0xe033 /* GDT index 262 */
30 #define FLAT_COMPAT_KERNEL_DS FLAT_COMPAT_RING1_DS
31 #define FLAT_COMPAT_KERNEL_CS FLAT_COMPAT_RING1_CS
32 #define FLAT_COMPAT_KERNEL_SS FLAT_COMPAT_RING1_SS
33 #define FLAT_COMPAT_USER_DS FLAT_COMPAT_RING3_DS
34 #define FLAT_COMPAT_USER_CS FLAT_COMPAT_RING3_CS
35 #define FLAT_COMPAT_USER_SS FLAT_COMPAT_RING3_SS
37 #define __FIRST_TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8)
38 #define __FIRST_LDT_ENTRY (__FIRST_TSS_ENTRY + 2)
40 #define __TSS(n) (((n)<<2) + __FIRST_TSS_ENTRY)
41 #define __LDT(n) (((n)<<2) + __FIRST_LDT_ENTRY)
43 #elif defined(__i386__)
45 #define FLAT_COMPAT_KERNEL_CS FLAT_KERNEL_CS
46 #define FLAT_COMPAT_KERNEL_DS FLAT_KERNEL_DS
47 #define FLAT_COMPAT_KERNEL_SS FLAT_KERNEL_SS
48 #define FLAT_COMPAT_USER_CS FLAT_USER_CS
49 #define FLAT_COMPAT_USER_DS FLAT_USER_DS
50 #define FLAT_COMPAT_USER_SS FLAT_USER_SS
52 #define __DOUBLEFAULT_TSS_ENTRY FIRST_RESERVED_GDT_ENTRY
54 #define __FIRST_TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8)
55 #define __FIRST_LDT_ENTRY (__FIRST_TSS_ENTRY + 1)
57 #define __TSS(n) (((n)<<1) + __FIRST_TSS_ENTRY)
58 #define __LDT(n) (((n)<<1) + __FIRST_LDT_ENTRY)
60 #endif
62 #ifndef __ASSEMBLY__
64 #define load_TR(n) __asm__ __volatile__ ("ltr %%ax" : : "a" (__TSS(n)<<3) )
66 #if defined(__x86_64__)
67 #define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
68 #elif defined(__i386__)
69 #define GUEST_KERNEL_RPL(d) ((void)(d), 1)
70 #endif
72 /* Fix up the RPL of a guest segment selector. */
73 #define __fixup_guest_selector(d, sel) \
74 ({ \
75 uint16_t _rpl = GUEST_KERNEL_RPL(d); \
76 (sel) = (((sel) & 3) >= _rpl) ? (sel) : (((sel) & ~3) | _rpl); \
77 })
79 /* Stack selectors don't need fixing up if the kernel runs in ring 0. */
80 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
81 #define fixup_guest_stack_selector(d, ss) ((void)0)
82 #else
83 #define fixup_guest_stack_selector(d, ss) __fixup_guest_selector(d, ss)
84 #endif
86 /*
87 * Code selectors are always fixed up. It allows the Xen exit stub to detect
88 * return to guest context, even when the guest kernel runs in ring 0.
89 */
90 #define fixup_guest_code_selector(d, cs) __fixup_guest_selector(d, cs)
92 /*
93 * We need this function because enforcing the correct guest kernel RPL is
94 * unsufficient if the selector is poked into an interrupt, trap or call gate.
95 * The selector RPL is ignored when a gate is accessed. We must therefore make
96 * sure that the selector does not reference a Xen-private segment.
97 *
98 * Note that selectors used only by IRET do not need to be checked. If the
99 * descriptor DPL fiffers from CS RPL then we'll #GP.
100 *
101 * Stack and data selectors do not need to be checked. If DS, ES, FS, GS are
102 * DPL < CPL then they'll be cleared automatically. If SS RPL or DPL differs
103 * from CS RPL then we'll #GP.
104 */
105 #define guest_gate_selector_okay(d, sel) \
106 ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */ \
107 ((sel) == (!is_pv_32on64_domain(d) ? \
108 FLAT_KERNEL_CS : /* Xen default seg? */ \
109 FLAT_COMPAT_KERNEL_CS)) || \
110 ((sel) & 4)) /* LDT seg? */
112 #endif /* __ASSEMBLY__ */
114 /* These are bitmasks for the high 32 bits of a descriptor table entry. */
115 #define _SEGMENT_TYPE (15<< 8)
116 #define _SEGMENT_WR ( 1<< 9) /* Writeable (data) or Readable (code)
117 segment */
118 #define _SEGMENT_EC ( 1<<10) /* Expand-down or Conforming segment */
119 #define _SEGMENT_CODE ( 1<<11) /* Code (vs data) segment for non-system
120 segments */
121 #define _SEGMENT_S ( 1<<12) /* System descriptor (yes iff S==0) */
122 #define _SEGMENT_DPL ( 3<<13) /* Descriptor Privilege Level */
123 #define _SEGMENT_P ( 1<<15) /* Segment Present */
124 #ifdef __x86_64
125 #define _SEGMENT_L ( 1<<21) /* 64-bit segment */
126 #else
127 #define _SEGMENT_L 0
128 #endif
129 #define _SEGMENT_DB ( 1<<22) /* 16- or 32-bit segment */
130 #define _SEGMENT_G ( 1<<23) /* Granularity */
132 #ifndef __ASSEMBLY__
134 struct desc_struct {
135 u32 a, b;
136 };
138 #if defined(__x86_64__)
140 typedef struct {
141 u64 a, b;
142 } idt_entry_t;
144 #define _set_gate(gate_addr,type,dpl,addr) \
145 do { \
146 (gate_addr)->a = \
147 (((unsigned long)(addr) & 0xFFFF0000UL) << 32) | \
148 ((unsigned long)(dpl) << 45) | \
149 ((unsigned long)(type) << 40) | \
150 ((unsigned long)(addr) & 0xFFFFUL) | \
151 ((unsigned long)__HYPERVISOR_CS64 << 16) | \
152 (1UL << 47); \
153 (gate_addr)->b = \
154 ((unsigned long)(addr) >> 32); \
155 } while (0)
157 #define _set_tssldt_desc(desc,addr,limit,type) \
158 do { \
159 (desc)[0].a = \
160 ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \
161 (desc)[0].b = \
162 ((u32)(addr) & 0xFF000000U) | \
163 ((u32)(type) << 8) | 0x8000U | \
164 (((u32)(addr) & 0x00FF0000U) >> 16); \
165 (desc)[1].a = (u32)(((unsigned long)(addr)) >> 32); \
166 (desc)[1].b = 0; \
167 } while (0)
169 #elif defined(__i386__)
171 typedef struct desc_struct idt_entry_t;
173 #define _set_gate(gate_addr,type,dpl,addr) \
174 do { \
175 int __d0, __d1; \
176 __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
177 "movw %4,%%dx\n\t" \
178 "movl %%eax,%0\n\t" \
179 "movl %%edx,%1" \
180 :"=m" (*((long *) (gate_addr))), \
181 "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
182 :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
183 "3" ((char *) (addr)),"2" (__HYPERVISOR_CS << 16)); \
184 } while (0)
186 #define _set_tssldt_desc(n,addr,limit,type) \
187 __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
188 "movw %%ax,2(%2)\n\t" \
189 "rorl $16,%%eax\n\t" \
190 "movb %%al,4(%2)\n\t" \
191 "movb %4,5(%2)\n\t" \
192 "movb $0,6(%2)\n\t" \
193 "movb %%ah,7(%2)\n\t" \
194 "rorl $16,%%eax" \
195 : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type|0x80))
197 #endif
199 extern struct desc_struct gdt_table[];
200 #ifdef CONFIG_COMPAT
201 extern struct desc_struct compat_gdt_table[];
202 #else
203 # define compat_gdt_table gdt_table
204 #endif
206 extern void set_intr_gate(unsigned int irq, void * addr);
207 extern void set_system_gate(unsigned int n, void *addr);
208 extern void set_task_gate(unsigned int n, unsigned int sel);
209 extern void set_tss_desc(unsigned int n, void *addr);
211 #endif /* !__ASSEMBLY__ */
213 #endif /* __ARCH_DESC_H */