ia64/xen-unstable

view xen/include/asm-x86/desc.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents e1562a36094e
children
line source
1 #ifndef __ARCH_DESC_H
2 #define __ARCH_DESC_H
4 /*
5 * Xen reserves a memory page of GDT entries.
6 * No guest GDT entries exist beyond the Xen reserved area.
7 */
8 #define NR_RESERVED_GDT_PAGES 1
9 #define NR_RESERVED_GDT_BYTES (NR_RESERVED_GDT_PAGES * PAGE_SIZE)
10 #define NR_RESERVED_GDT_ENTRIES (NR_RESERVED_GDT_BYTES / 8)
12 #define LAST_RESERVED_GDT_PAGE \
13 (FIRST_RESERVED_GDT_PAGE + NR_RESERVED_GDT_PAGES - 1)
14 #define LAST_RESERVED_GDT_BYTE \
15 (FIRST_RESERVED_GDT_BYTE + NR_RESERVED_GDT_BYTES - 1)
16 #define LAST_RESERVED_GDT_ENTRY \
17 (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
19 #define LDT_ENTRY_SIZE 8
21 #if defined(__x86_64__)
23 #define FLAT_COMPAT_RING1_CS 0xe019 /* GDT index 259 */
24 #define FLAT_COMPAT_RING1_DS 0xe021 /* GDT index 260 */
25 #define FLAT_COMPAT_RING1_SS 0xe021 /* GDT index 260 */
26 #define FLAT_COMPAT_RING3_CS 0xe02b /* GDT index 261 */
27 #define FLAT_COMPAT_RING3_DS 0xe033 /* GDT index 262 */
28 #define FLAT_COMPAT_RING3_SS 0xe033 /* GDT index 262 */
30 #define FLAT_COMPAT_KERNEL_DS FLAT_COMPAT_RING1_DS
31 #define FLAT_COMPAT_KERNEL_CS FLAT_COMPAT_RING1_CS
32 #define FLAT_COMPAT_KERNEL_SS FLAT_COMPAT_RING1_SS
33 #define FLAT_COMPAT_USER_DS FLAT_COMPAT_RING3_DS
34 #define FLAT_COMPAT_USER_CS FLAT_COMPAT_RING3_CS
35 #define FLAT_COMPAT_USER_SS FLAT_COMPAT_RING3_SS
37 #define TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8)
38 #define LDT_ENTRY (TSS_ENTRY + 2)
39 #define PER_CPU_GDT_ENTRY (LDT_ENTRY + 2)
41 #elif defined(__i386__)
43 #define FLAT_COMPAT_KERNEL_CS FLAT_KERNEL_CS
44 #define FLAT_COMPAT_KERNEL_DS FLAT_KERNEL_DS
45 #define FLAT_COMPAT_KERNEL_SS FLAT_KERNEL_SS
46 #define FLAT_COMPAT_USER_CS FLAT_USER_CS
47 #define FLAT_COMPAT_USER_DS FLAT_USER_DS
48 #define FLAT_COMPAT_USER_SS FLAT_USER_SS
50 #define DOUBLEFAULT_TSS_ENTRY FIRST_RESERVED_GDT_ENTRY
52 #define TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8)
53 #define LDT_ENTRY (TSS_ENTRY + 1)
54 #define PER_CPU_GDT_ENTRY (LDT_ENTRY + 1)
56 #endif
58 #ifndef __ASSEMBLY__
60 #if defined(__x86_64__)
61 #define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
62 #elif defined(__i386__)
63 #define GUEST_KERNEL_RPL(d) ((void)(d), 1)
64 #endif
66 /* Fix up the RPL of a guest segment selector. */
67 #define __fixup_guest_selector(d, sel) \
68 ({ \
69 uint16_t _rpl = GUEST_KERNEL_RPL(d); \
70 (sel) = (((sel) & 3) >= _rpl) ? (sel) : (((sel) & ~3) | _rpl); \
71 })
73 /* Stack selectors don't need fixing up if the kernel runs in ring 0. */
74 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
75 #define fixup_guest_stack_selector(d, ss) ((void)0)
76 #else
77 #define fixup_guest_stack_selector(d, ss) __fixup_guest_selector(d, ss)
78 #endif
80 /*
81 * Code selectors are always fixed up. It allows the Xen exit stub to detect
82 * return to guest context, even when the guest kernel runs in ring 0.
83 */
84 #define fixup_guest_code_selector(d, cs) __fixup_guest_selector(d, cs)
86 /*
87 * We need this function because enforcing the correct guest kernel RPL is
88 * unsufficient if the selector is poked into an interrupt, trap or call gate.
89 * The selector RPL is ignored when a gate is accessed. We must therefore make
90 * sure that the selector does not reference a Xen-private segment.
91 *
92 * Note that selectors used only by IRET do not need to be checked. If the
93 * descriptor DPL fiffers from CS RPL then we'll #GP.
94 *
95 * Stack and data selectors do not need to be checked. If DS, ES, FS, GS are
96 * DPL < CPL then they'll be cleared automatically. If SS RPL or DPL differs
97 * from CS RPL then we'll #GP.
98 */
99 #define guest_gate_selector_okay(d, sel) \
100 ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */ \
101 ((sel) == (!is_pv_32on64_domain(d) ? \
102 FLAT_KERNEL_CS : /* Xen default seg? */ \
103 FLAT_COMPAT_KERNEL_CS)) || \
104 ((sel) & 4)) /* LDT seg? */
106 #endif /* __ASSEMBLY__ */
108 /* These are bitmasks for the high 32 bits of a descriptor table entry. */
109 #define _SEGMENT_TYPE (15<< 8)
110 #define _SEGMENT_WR ( 1<< 9) /* Writeable (data) or Readable (code)
111 segment */
112 #define _SEGMENT_EC ( 1<<10) /* Expand-down or Conforming segment */
113 #define _SEGMENT_CODE ( 1<<11) /* Code (vs data) segment for non-system
114 segments */
115 #define _SEGMENT_S ( 1<<12) /* System descriptor (yes iff S==0) */
116 #define _SEGMENT_DPL ( 3<<13) /* Descriptor Privilege Level */
117 #define _SEGMENT_P ( 1<<15) /* Segment Present */
118 #ifdef __x86_64__
119 #define _SEGMENT_L ( 1<<21) /* 64-bit segment */
120 #else
121 #define _SEGMENT_L 0
122 #endif
123 #define _SEGMENT_DB ( 1<<22) /* 16- or 32-bit segment */
124 #define _SEGMENT_G ( 1<<23) /* Granularity */
126 #ifndef __ASSEMBLY__
128 struct desc_struct {
129 u32 a, b;
130 };
132 #if defined(__x86_64__)
134 typedef struct {
135 u64 a, b;
136 } idt_entry_t;
138 #define _set_gate(gate_addr,type,dpl,addr) \
139 do { \
140 (gate_addr)->a = 0; \
141 wmb(); /* disable gate /then/ rewrite */ \
142 (gate_addr)->b = \
143 ((unsigned long)(addr) >> 32); \
144 wmb(); /* rewrite /then/ enable gate */ \
145 (gate_addr)->a = \
146 (((unsigned long)(addr) & 0xFFFF0000UL) << 32) | \
147 ((unsigned long)(dpl) << 45) | \
148 ((unsigned long)(type) << 40) | \
149 ((unsigned long)(addr) & 0xFFFFUL) | \
150 ((unsigned long)__HYPERVISOR_CS64 << 16) | \
151 (1UL << 47); \
152 } while (0)
154 #define _set_tssldt_desc(desc,addr,limit,type) \
155 do { \
156 (desc)[0].b = (desc)[1].b = 0; \
157 wmb(); /* disable entry /then/ rewrite */ \
158 (desc)[0].a = \
159 ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \
160 (desc)[1].a = (u32)(((unsigned long)(addr)) >> 32); \
161 wmb(); /* rewrite /then/ enable entry */ \
162 (desc)[0].b = \
163 ((u32)(addr) & 0xFF000000U) | \
164 ((u32)(type) << 8) | 0x8000U | \
165 (((u32)(addr) & 0x00FF0000U) >> 16); \
166 } while (0)
168 #elif defined(__i386__)
170 typedef struct desc_struct idt_entry_t;
172 #define _set_gate(gate_addr,type,dpl,addr) \
173 do { \
174 (gate_addr)->b = 0; \
175 wmb(); /* disable gate /then/ rewrite */ \
176 (gate_addr)->a = \
177 ((unsigned long)(addr) & 0xFFFFUL) | \
178 ((unsigned long)__HYPERVISOR_CS << 16); \
179 wmb(); /* rewrite /then/ enable gate */ \
180 (gate_addr)->b = \
181 ((unsigned long)(addr) & 0xFFFF0000UL) | \
182 ((unsigned long)(dpl) << 13) | \
183 ((unsigned long)(type) << 8) | \
184 (1UL << 15); \
185 } while (0)
187 #define _set_tssldt_desc(desc,addr,limit,type) \
188 do { \
189 (desc)->b = 0; \
190 wmb(); /* disable entry /then/ rewrite */ \
191 (desc)->a = \
192 ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \
193 wmb(); /* rewrite /then/ enable entry */ \
194 (desc)->b = \
195 ((u32)(addr) & 0xFF000000U) | \
196 ((u32)(type) << 8) | 0x8000U | \
197 (((u32)(addr) & 0x00FF0000U) >> 16); \
198 } while (0)
200 DECLARE_PER_CPU(struct tss_struct *, doublefault_tss);
202 #endif
204 struct desc_ptr {
205 unsigned short limit;
206 unsigned long base;
207 } __attribute__((__packed__)) ;
209 extern struct desc_struct boot_cpu_gdt_table[];
210 DECLARE_PER_CPU(struct desc_struct *, gdt_table);
211 #ifdef CONFIG_COMPAT
212 extern struct desc_struct boot_cpu_compat_gdt_table[];
213 DECLARE_PER_CPU(struct desc_struct *, compat_gdt_table);
214 #else
215 # define boot_cpu_compat_gdt_table boot_cpu_gdt_table
216 # define per_cpu__compat_gdt_table per_cpu__gdt_table
217 #endif
219 extern void set_intr_gate(unsigned int irq, void * addr);
220 extern void load_TR(void);
222 #endif /* !__ASSEMBLY__ */
224 #endif /* __ARCH_DESC_H */