ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/desc.h @ 6294:1a0723cd37f1

Fix many uses of machine addresses in XenLinux. Primarily
this fixes users of virt_to_machine/machine_to_virt to
use virt_to_mfn/mfn_to_virt where that is more appropriate.

This should be a big step to improved PAE stability.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Aug 19 16:06:43 2005 +0000 (2005-08-19)
parents 56a63f9f378f
children f51fe43c5d1c 5f4724c13040 3a8f27c6d56c
line source
1 /* Written 2000 by Andi Kleen */
2 #ifndef __ARCH_DESC_H
3 #define __ARCH_DESC_H
5 #include <linux/threads.h>
6 #include <asm/ldt.h>
8 #ifndef __ASSEMBLY__
10 #include <linux/string.h>
11 #include <asm/segment.h>
12 #include <asm/mmu.h>
14 // 8 byte segment descriptor
15 struct desc_struct {
16 u16 limit0;
17 u16 base0;
18 unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
19 unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
20 } __attribute__((packed));
22 struct n_desc_struct {
23 unsigned int a,b;
24 };
26 enum {
27 GATE_INTERRUPT = 0xE,
28 GATE_TRAP = 0xF,
29 GATE_CALL = 0xC,
30 };
32 // 16byte gate
33 struct gate_struct {
34 u16 offset_low;
35 u16 segment;
36 unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
37 u16 offset_middle;
38 u32 offset_high;
39 u32 zero1;
40 } __attribute__((packed));
42 #define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
43 #define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
44 #define PTR_HIGH(x) ((unsigned long)(x) >> 32)
46 enum {
47 DESC_TSS = 0x9,
48 DESC_LDT = 0x2,
49 };
51 // LDT or TSS descriptor in the GDT. 16 bytes.
52 struct ldttss_desc {
53 u16 limit0;
54 u16 base0;
55 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
56 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
57 u32 base3;
58 u32 zero1;
59 } __attribute__((packed));
61 struct desc_ptr {
62 unsigned short size;
63 unsigned long address;
64 } __attribute__((packed)) ;
66 extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
68 extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
70 #define get_cpu_gdt_table(_cpu) ((struct desc_struct *)(cpu_gdt_descr[(_cpu)].address))
72 #define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
73 #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
75 static inline void clear_LDT(void)
76 {
77 int cpu = get_cpu();
79 /*
80 * NB. We load the default_ldt for lcall7/27 handling on demand, as
81 * it slows down context switching. Noone uses it anyway.
82 */
83 cpu = cpu; /* XXX avoid compiler warning */
84 xen_set_ldt(0UL, 0);
85 put_cpu();
86 }
88 /*
89 * This is the ldt that every process will get unless we need
90 * something other than this.
91 */
92 extern struct desc_struct default_ldt[];
93 extern struct gate_struct idt_table[];
95 static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
96 {
97 struct gate_struct s;
98 s.offset_low = PTR_LOW(func);
99 s.segment = __KERNEL_CS;
100 s.ist = ist;
101 s.p = 1;
102 s.dpl = dpl;
103 s.zero0 = 0;
104 s.zero1 = 0;
105 s.type = type;
106 s.offset_middle = PTR_MIDDLE(func);
107 s.offset_high = PTR_HIGH(func);
108 /* does not need to be atomic because it is only done once at setup time */
109 memcpy(adr, &s, 16);
110 }
112 static inline void set_intr_gate(int nr, void *func)
113 {
114 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
115 }
117 static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
118 {
119 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
120 }
122 static inline void set_system_gate(int nr, void *func)
123 {
124 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
125 }
127 static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
128 unsigned size)
129 {
130 struct ldttss_desc d;
131 memset(&d,0,sizeof(d));
132 d.limit0 = size & 0xFFFF;
133 d.base0 = PTR_LOW(tss);
134 d.base1 = PTR_MIDDLE(tss) & 0xFF;
135 d.type = type;
136 d.p = 1;
137 d.limit1 = (size >> 16) & 0xF;
138 d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
139 d.base3 = PTR_HIGH(tss);
140 memcpy(ptr, &d, 16);
141 }
143 static inline void set_tss_desc(unsigned cpu, void *addr)
144 {
145 set_tssldt_descriptor((struct ldttss_desc *)&get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS],
146 (unsigned long)addr,
147 DESC_TSS,
148 sizeof(struct tss_struct) - 1);
149 }
151 static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
152 {
153 set_tssldt_descriptor((struct ldttss_desc *)&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
154 (unsigned long)addr,
155 DESC_LDT, size * 8 - 1);
156 }
158 static inline void set_seg_base(unsigned cpu, int entry, void *base)
159 {
160 struct desc_struct *d = (struct desc_struct *)&get_cpu_gdt_table(cpu)[entry];
161 u32 addr = (u32)(u64)base;
162 BUG_ON((u64)base >> 32);
163 d->base0 = addr & 0xffff;
164 d->base1 = (addr >> 16) & 0xff;
165 d->base2 = (addr >> 24) & 0xff;
166 }
168 #define LDT_entry_a(info) \
169 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
170 /* Don't allow setting of the lm bit. It is useless anyways because
171 64bit system calls require __USER_CS. */
172 #define LDT_entry_b(info) \
173 (((info)->base_addr & 0xff000000) | \
174 (((info)->base_addr & 0x00ff0000) >> 16) | \
175 ((info)->limit & 0xf0000) | \
176 (((info)->read_exec_only ^ 1) << 9) | \
177 ((info)->contents << 10) | \
178 (((info)->seg_not_present ^ 1) << 15) | \
179 ((info)->seg_32bit << 22) | \
180 ((info)->limit_in_pages << 23) | \
181 ((info)->useable << 20) | \
182 /* ((info)->lm << 21) | */ \
183 0x7000)
185 #define LDT_empty(info) (\
186 (info)->base_addr == 0 && \
187 (info)->limit == 0 && \
188 (info)->contents == 0 && \
189 (info)->read_exec_only == 1 && \
190 (info)->seg_32bit == 0 && \
191 (info)->limit_in_pages == 0 && \
192 (info)->seg_not_present == 1 && \
193 (info)->useable == 0 && \
194 (info)->lm == 0)
196 #if TLS_SIZE != 24
197 # error update this code.
198 #endif
200 static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
201 {
202 #if 0
203 u64 *gdt = (u64 *)(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN);
204 gdt[0] = t->tls_array[0];
205 gdt[1] = t->tls_array[1];
206 gdt[2] = t->tls_array[2];
207 #endif
208 #define C(i) \
209 HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), t->tls_array[i])
211 C(0); C(1); C(2);
212 #undef C
213 }
215 /*
216 * load one particular LDT into the current CPU
217 */
218 extern inline void load_LDT_nolock (mm_context_t *pc, int cpu)
219 {
220 void *segments = pc->ldt;
221 int count = pc->size;
223 if (likely(!count))
224 segments = NULL;
226 xen_set_ldt((unsigned long)segments, count);
227 }
229 static inline void load_LDT(mm_context_t *pc)
230 {
231 int cpu = get_cpu();
232 load_LDT_nolock(pc, cpu);
233 put_cpu();
234 }
236 extern struct desc_ptr idt_descr;
238 #endif /* !__ASSEMBLY__ */
240 #endif