ia64/xen-unstable

view xen/include/public/arch-x86_64.h @ 5567:a9eabb439f56

bitkeeper revision 1.1744.1.1 (42bb2b36zVlhMosXY3bEaiG-q9siMg)

fix tools compilation on x86_64 adding 2 fields that will be unused

Signed-off-by: Vincent Hanquez <vincent@xensource.com>
author vh249@arcadians.cl.cam.ac.uk
date Thu Jun 23 21:35:50 2005 +0000 (2005-06-23)
parents 649cd37aa1ab
children ef4c824e3720
line source
1 /******************************************************************************
2 * arch-x86_64.h
3 *
4 * Guest OS interface to x86 64-bit Xen.
5 *
6 * Copyright (c) 2004, K A Fraser
7 */
9 #ifndef __XEN_PUBLIC_ARCH_X86_64_H__
10 #define __XEN_PUBLIC_ARCH_X86_64_H__
12 #ifndef PACKED
13 /* GCC-specific way to pack structure definitions (no implicit padding). */
14 #define PACKED __attribute__ ((packed))
15 #endif
17 /* Pointers are naturally 64 bits in this architecture; no padding needed. */
18 #define _MEMORY_PADDING(_X)
19 #define MEMORY_PADDING
21 /*
22 * SEGMENT DESCRIPTOR TABLES
23 */
24 /*
25 * A number of GDT entries are reserved by Xen. These are not situated at the
26 * start of the GDT because some stupid OSes export hard-coded selector values
27 * in their ABI. These hard-coded values are always near the start of the GDT,
28 * so Xen places itself out of the way, at the far end of the GDT.
29 */
30 #define FIRST_RESERVED_GDT_PAGE 14
31 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
32 #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
34 /*
35 * 64-bit segment selectors
36 * These flat segments are in the Xen-private section of every GDT. Since these
37 * are also present in the initial GDT, many OSes will be able to avoid
38 * installing their own GDT.
39 */
41 #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
42 #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
43 #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
44 #define FLAT_RING3_DS64 0x0000 /* NULL selector */
45 #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
46 #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
48 #define FLAT_KERNEL_DS64 FLAT_RING3_DS64
49 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32
50 #define FLAT_KERNEL_DS FLAT_KERNEL_DS64
51 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64
52 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32
53 #define FLAT_KERNEL_CS FLAT_KERNEL_CS64
54 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64
55 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32
56 #define FLAT_KERNEL_SS FLAT_KERNEL_SS64
58 #define FLAT_USER_DS64 FLAT_RING3_DS64
59 #define FLAT_USER_DS32 FLAT_RING3_DS32
60 #define FLAT_USER_DS FLAT_USER_DS64
61 #define FLAT_USER_CS64 FLAT_RING3_CS64
62 #define FLAT_USER_CS32 FLAT_RING3_CS32
63 #define FLAT_USER_CS FLAT_USER_CS64
64 #define FLAT_USER_SS64 FLAT_RING3_SS64
65 #define FLAT_USER_SS32 FLAT_RING3_SS32
66 #define FLAT_USER_SS FLAT_USER_SS64
68 /* And the trap vector is... */
69 #define TRAP_INSTR "syscall"
71 #ifndef HYPERVISOR_VIRT_START
72 #define HYPERVISOR_VIRT_START (0xFFFF800000000000UL)
73 #define HYPERVISOR_VIRT_END (0xFFFF880000000000UL)
74 #endif
76 /* Maximum number of virtual CPUs in multi-processor guests. */
77 #define MAX_VIRT_CPUS 32
79 #ifndef __ASSEMBLY__
81 /* The machine->physical mapping table starts at this address, read-only. */
82 #ifndef machine_to_phys_mapping
83 #define machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
84 #endif
86 /*
87 * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
88 * @which == SEGBASE_* ; @base == 64-bit base address
89 * Returns 0 on success.
90 */
91 #define SEGBASE_FS 0
92 #define SEGBASE_GS_USER 1
93 #define SEGBASE_GS_KERNEL 2
94 #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
96 /*
97 * int HYPERVISOR_switch_to_user(void)
98 * All arguments are on the kernel stack, in the following format.
99 * Never returns if successful. Current kernel context is lost.
100 * If flags contains VGCF_IN_SYSCALL:
101 * Restore RAX, RIP, RFLAGS, RSP.
102 * Discard R11, RCX, CS, SS.
103 * Otherwise:
104 * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
105 * All other registers are saved on hypercall entry and restored to user.
106 */
107 /* Guest exited in SYSCALL context? Return to guest with SYSRET? */
108 #define VGCF_IN_SYSCALL (1<<8)
109 struct switch_to_user {
110 /* Top of stack (%rsp at point of hypercall). */
111 u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
112 /* Bottom of switch_to_user stack frame. */
113 } PACKED;
115 /* NB. Both the following are 64 bits each. */
116 typedef unsigned long memory_t; /* Full-sized pointer/address/memory-size. */
118 /*
119 * Send an array of these to HYPERVISOR_set_trap_table().
120 * N.B. As in x86/32 mode, the privilege level specifies which modes may enter
121 * a trap via a software interrupt. Since rings 1 and 2 are unavailable, we
122 * allocate privilege levels as follows:
123 * Level == 0: Noone may enter
124 * Level == 1: Kernel may enter
125 * Level == 2: Kernel may enter
126 * Level == 3: Everyone may enter
127 */
128 #define TI_GET_DPL(_ti) ((_ti)->flags & 3)
129 #define TI_GET_IF(_ti) ((_ti)->flags & 4)
130 #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
131 #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
132 typedef struct {
133 u8 vector; /* 0: exception vector */
134 u8 flags; /* 1: 0-3: privilege level; 4: clear event enable? */
135 u16 cs; /* 2: code selector */
136 u32 __pad; /* 4 */
137 memory_t address; /* 8: code address */
138 } PACKED trap_info_t; /* 16 bytes */
140 typedef struct cpu_user_regs {
141 u64 r15;
142 u64 r14;
143 u64 r13;
144 u64 r12;
145 union { u64 rbp, ebp; };
146 union { u64 rbx, ebx; };
147 u64 r11;
148 u64 r10;
149 u64 r9;
150 u64 r8;
151 union { u64 rax, eax; };
152 union { u64 rcx, ecx; };
153 union { u64 rdx, edx; };
154 union { u64 rsi, esi; };
155 union { u64 rdi, edi; };
156 u32 error_code; /* private */
157 u32 entry_vector; /* private */
158 union { u64 rip, eip; };
159 u16 cs, _pad0[1];
160 u8 saved_upcall_mask;
161 u8 _pad1[3];
162 union { u64 rflags, eflags; };
163 union { u64 rsp, esp; };
164 u16 ss, _pad2[3];
165 u16 es, _pad3[3];
166 u16 ds, _pad4[3];
167 u16 fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
168 u16 gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_user. */
169 } cpu_user_regs_t;
171 typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
173 /*
174 * The following is all CPU context. Note that the fpu_ctxt block is filled
175 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
176 */
177 typedef struct vcpu_guest_context {
178 #define VGCF_I387_VALID (1<<0)
179 #define VGCF_VMX_GUEST (1<<1)
180 #define VGCF_IN_KERNEL (1<<2)
181 unsigned long flags; /* VGCF_* flags */
182 cpu_user_regs_t user_regs; /* User-level CPU registers */
183 struct { char x[512]; } fpu_ctxt /* User-level FPU registers */
184 __attribute__((__aligned__(16))); /* (needs 16-byte alignment) */
185 trap_info_t trap_ctxt[256]; /* Virtual IDT */
186 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
187 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
188 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
189 unsigned long pt_base; /* CR3 (pagetable base) */
190 unsigned long cr0; /* CR0 */
191 unsigned long cr4; /* CR4 */
192 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
193 unsigned long event_callback_eip;
194 unsigned long failsafe_callback_eip;
195 unsigned long syscall_callback_eip;
196 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
197 /* Segment base addresses. */
198 u64 fs_base;
199 u64 gs_base_kernel;
200 u64 gs_base_user;
201 } vcpu_guest_context_t;
203 typedef struct {
204 /* MFN of a table of MFNs that make up p2m table */
205 u64 pfn_to_mfn_frame_list;
206 } arch_shared_info_t;
208 typedef struct {
209 } arch_vcpu_info_t;
211 #endif /* !__ASSEMBLY__ */
213 #endif