ia64/xen-unstable

view xen/include/public/arch-x86_64.h @ 12938:b58670602d35

[POWERPC][XEN] Builtin cmdline dependency rule
Rebuild cmdline.o when the user changes the CMDLINE=X argument passed to
the make invocation. I couldn't find an example of another project that
handles this case properly, so I came up with this.
Signed-off-by: Amos Waterland <apw@us.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Hollis Blanchard <hollisb@us.ibm.com>
date Thu Oct 05 15:48:26 2006 -0500 (2006-10-05)
parents 8d0e06c38c0c
children 71c40c3f92f7
line source
1 /******************************************************************************
2 * arch-x86_64.h
3 *
4 * Guest OS interface to x86 64-bit Xen.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2004, K A Fraser
25 */
27 #ifndef __XEN_PUBLIC_ARCH_X86_64_H__
28 #define __XEN_PUBLIC_ARCH_X86_64_H__
30 /*
31 * Hypercall interface:
32 * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5)
33 * Output: %rax
34 * Access is via hypercall page (set up by guest loader or via a Xen MSR):
35 * call hypercall_page + hypercall-number * 32
36 * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi)
37 */
39 #if __XEN_INTERFACE_VERSION__ < 0x00030203
40 /*
41 * Legacy hypercall interface:
42 * As above, except the entry sequence to the hypervisor is:
43 * mov $hypercall-number*32,%eax ; syscall
44 * Clobbered: %rcx, %r11, argument registers (as above)
45 */
46 #define TRAP_INSTR "syscall"
47 #endif
49 /* Structural guest handles introduced in 0x00030201. */
50 #if __XEN_INTERFACE_VERSION__ >= 0x00030201
51 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \
52 typedef struct { type *p; } __guest_handle_ ## name
53 #else
54 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \
55 typedef type * __guest_handle_ ## name
56 #endif
58 #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
59 #define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
60 #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
61 #ifdef __XEN_TOOLS__
62 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
63 #endif
65 #ifndef __ASSEMBLY__
66 /* Guest handles for primitive C types. */
67 __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
68 __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
69 __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
70 DEFINE_XEN_GUEST_HANDLE(char);
71 DEFINE_XEN_GUEST_HANDLE(int);
72 DEFINE_XEN_GUEST_HANDLE(long);
73 DEFINE_XEN_GUEST_HANDLE(void);
75 typedef unsigned long xen_pfn_t;
76 DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
77 #endif
79 /*
80 * SEGMENT DESCRIPTOR TABLES
81 */
82 /*
83 * A number of GDT entries are reserved by Xen. These are not situated at the
84 * start of the GDT because some stupid OSes export hard-coded selector values
85 * in their ABI. These hard-coded values are always near the start of the GDT,
86 * so Xen places itself out of the way, at the far end of the GDT.
87 */
88 #define FIRST_RESERVED_GDT_PAGE 14
89 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
90 #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
92 /*
93 * 64-bit segment selectors
94 * These flat segments are in the Xen-private section of every GDT. Since these
95 * are also present in the initial GDT, many OSes will be able to avoid
96 * installing their own GDT.
97 */
99 #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
100 #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
101 #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
102 #define FLAT_RING3_DS64 0x0000 /* NULL selector */
103 #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
104 #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
106 #define FLAT_KERNEL_DS64 FLAT_RING3_DS64
107 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32
108 #define FLAT_KERNEL_DS FLAT_KERNEL_DS64
109 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64
110 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32
111 #define FLAT_KERNEL_CS FLAT_KERNEL_CS64
112 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64
113 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32
114 #define FLAT_KERNEL_SS FLAT_KERNEL_SS64
116 #define FLAT_USER_DS64 FLAT_RING3_DS64
117 #define FLAT_USER_DS32 FLAT_RING3_DS32
118 #define FLAT_USER_DS FLAT_USER_DS64
119 #define FLAT_USER_CS64 FLAT_RING3_CS64
120 #define FLAT_USER_CS32 FLAT_RING3_CS32
121 #define FLAT_USER_CS FLAT_USER_CS64
122 #define FLAT_USER_SS64 FLAT_RING3_SS64
123 #define FLAT_USER_SS32 FLAT_RING3_SS32
124 #define FLAT_USER_SS FLAT_USER_SS64
126 #define __HYPERVISOR_VIRT_START 0xFFFF800000000000
127 #define __HYPERVISOR_VIRT_END 0xFFFF880000000000
128 #define __MACH2PHYS_VIRT_START 0xFFFF800000000000
129 #define __MACH2PHYS_VIRT_END 0xFFFF804000000000
131 #ifndef HYPERVISOR_VIRT_START
132 #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
133 #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
134 #endif
136 #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
137 #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
138 #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
139 #ifndef machine_to_phys_mapping
140 #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
141 #endif
143 /* Maximum number of virtual CPUs in multi-processor guests. */
144 #define MAX_VIRT_CPUS 32
146 #ifndef __ASSEMBLY__
148 typedef unsigned long xen_ulong_t;
150 /*
151 * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
152 * @which == SEGBASE_* ; @base == 64-bit base address
153 * Returns 0 on success.
154 */
155 #define SEGBASE_FS 0
156 #define SEGBASE_GS_USER 1
157 #define SEGBASE_GS_KERNEL 2
158 #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
160 /*
161 * int HYPERVISOR_iret(void)
162 * All arguments are on the kernel stack, in the following format.
163 * Never returns if successful. Current kernel context is lost.
164 * The saved CS is mapped as follows:
165 * RING0 -> RING3 kernel mode.
166 * RING1 -> RING3 kernel mode.
167 * RING2 -> RING3 kernel mode.
168 * RING3 -> RING3 user mode.
169 * However RING0 indicates that the guest kernel should return to iteself
170 * directly with
171 * orb $3,1*8(%rsp)
172 * iretq
173 * If flags contains VGCF_in_syscall:
174 * Restore RAX, RIP, RFLAGS, RSP.
175 * Discard R11, RCX, CS, SS.
176 * Otherwise:
177 * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
178 * All other registers are saved on hypercall entry and restored to user.
179 */
180 /* Guest exited in SYSCALL context? Return to guest with SYSRET? */
181 #define _VGCF_in_syscall 8
182 #define VGCF_in_syscall (1<<_VGCF_in_syscall)
183 #define VGCF_IN_SYSCALL VGCF_in_syscall
184 struct iret_context {
185 /* Top of stack (%rsp at point of hypercall). */
186 uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
187 /* Bottom of iret stack frame. */
188 };
190 /*
191 * Send an array of these to HYPERVISOR_set_trap_table().
192 * N.B. As in x86/32 mode, the privilege level specifies which modes may enter
193 * a trap via a software interrupt. Since rings 1 and 2 are unavailable, we
194 * allocate privilege levels as follows:
195 * Level == 0: Noone may enter
196 * Level == 1: Kernel may enter
197 * Level == 2: Kernel may enter
198 * Level == 3: Everyone may enter
199 */
200 #define TI_GET_DPL(_ti) ((_ti)->flags & 3)
201 #define TI_GET_IF(_ti) ((_ti)->flags & 4)
202 #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
203 #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
204 struct trap_info {
205 uint8_t vector; /* exception vector */
206 uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
207 uint16_t cs; /* code selector */
208 unsigned long address; /* code offset */
209 };
210 typedef struct trap_info trap_info_t;
211 DEFINE_XEN_GUEST_HANDLE(trap_info_t);
213 #ifdef __GNUC__
214 /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
215 #define __DECL_REG(name) union { uint64_t r ## name, e ## name; }
216 #else
217 /* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
218 #define __DECL_REG(name) uint64_t r ## name
219 #endif
221 struct cpu_user_regs {
222 uint64_t r15;
223 uint64_t r14;
224 uint64_t r13;
225 uint64_t r12;
226 __DECL_REG(bp);
227 __DECL_REG(bx);
228 uint64_t r11;
229 uint64_t r10;
230 uint64_t r9;
231 uint64_t r8;
232 __DECL_REG(ax);
233 __DECL_REG(cx);
234 __DECL_REG(dx);
235 __DECL_REG(si);
236 __DECL_REG(di);
237 uint32_t error_code; /* private */
238 uint32_t entry_vector; /* private */
239 __DECL_REG(ip);
240 uint16_t cs, _pad0[1];
241 uint8_t saved_upcall_mask;
242 uint8_t _pad1[3];
243 __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
244 __DECL_REG(sp);
245 uint16_t ss, _pad2[3];
246 uint16_t es, _pad3[3];
247 uint16_t ds, _pad4[3];
248 uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
249 uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
250 };
251 typedef struct cpu_user_regs cpu_user_regs_t;
252 DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
254 #undef __DECL_REG
256 typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
258 /*
259 * The following is all CPU context. Note that the fpu_ctxt block is filled
260 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
261 */
262 struct vcpu_guest_context {
263 /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
264 struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
265 #define VGCF_I387_VALID (1<<0)
266 #define VGCF_IN_KERNEL (1<<2)
267 #define _VGCF_i387_valid 0
268 #define VGCF_i387_valid (1<<_VGCF_i387_valid)
269 #define _VGCF_in_kernel 2
270 #define VGCF_in_kernel (1<<_VGCF_in_kernel)
271 #define _VGCF_failsafe_disables_events 3
272 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events)
273 #define _VGCF_syscall_disables_events 4
274 #define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events)
275 unsigned long flags; /* VGCF_* flags */
276 struct cpu_user_regs user_regs; /* User-level CPU registers */
277 struct trap_info trap_ctxt[256]; /* Virtual IDT */
278 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
279 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
280 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
281 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
282 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
283 unsigned long event_callback_eip;
284 unsigned long failsafe_callback_eip;
285 unsigned long syscall_callback_eip;
286 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
287 /* Segment base addresses. */
288 uint64_t fs_base;
289 uint64_t gs_base_kernel;
290 uint64_t gs_base_user;
291 };
292 typedef struct vcpu_guest_context vcpu_guest_context_t;
293 DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
295 #define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
296 #define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
298 struct arch_shared_info {
299 unsigned long max_pfn; /* max pfn that appears in table */
300 /* Frame containing list of mfns containing list of mfns containing p2m. */
301 xen_pfn_t pfn_to_mfn_frame_list_list;
302 unsigned long nmi_reason;
303 uint64_t pad[32];
304 };
305 typedef struct arch_shared_info arch_shared_info_t;
307 struct arch_vcpu_info {
308 unsigned long cr2;
309 unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
310 };
311 typedef struct arch_vcpu_info arch_vcpu_info_t;
313 typedef unsigned long xen_callback_t;
315 #endif /* !__ASSEMBLY__ */
317 /*
318 * Prefix forces emulation of some non-trapping instructions.
319 * Currently only CPUID.
320 */
321 #ifdef __ASSEMBLY__
322 #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
323 #define XEN_CPUID XEN_EMULATE_PREFIX cpuid
324 #else
325 #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
326 #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
327 #endif
329 #endif
331 /*
332 * Local variables:
333 * mode: C
334 * c-set-style: "BSD"
335 * c-basic-offset: 4
336 * tab-width: 4
337 * indent-tabs-mode: nil
338 * End:
339 */