ia64/xen-unstable

view xen/include/public/arch-x86_64.h @ 5704:9b73afea874e

Certain types of event channel are now auto-bound to vcpu0 by Xen.
Make sure that xenolinux agrees with this.
author sos22@douglas.cl.cam.ac.uk
date Fri Jul 08 15:35:43 2005 +0000 (2005-07-08)
parents 2b6c1a809807
children 0608852073c8 631cc5dc3e8a eb9df2c3a478 f51fe43c5d1c 6783e59e1c45 8799d14bef77
line source
1 /******************************************************************************
2 * arch-x86_64.h
3 *
4 * Guest OS interface to x86 64-bit Xen.
5 *
6 * Copyright (c) 2004, K A Fraser
7 */
9 #ifndef __XEN_PUBLIC_ARCH_X86_64_H__
10 #define __XEN_PUBLIC_ARCH_X86_64_H__
12 /*
13 * SEGMENT DESCRIPTOR TABLES
14 */
15 /*
16 * A number of GDT entries are reserved by Xen. These are not situated at the
17 * start of the GDT because some stupid OSes export hard-coded selector values
18 * in their ABI. These hard-coded values are always near the start of the GDT,
19 * so Xen places itself out of the way, at the far end of the GDT.
20 */
21 #define FIRST_RESERVED_GDT_PAGE 14
22 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
23 #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
25 /*
26 * 64-bit segment selectors
27 * These flat segments are in the Xen-private section of every GDT. Since these
28 * are also present in the initial GDT, many OSes will be able to avoid
29 * installing their own GDT.
30 */
32 #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
33 #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
34 #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
35 #define FLAT_RING3_DS64 0x0000 /* NULL selector */
36 #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
37 #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
39 #define FLAT_KERNEL_DS64 FLAT_RING3_DS64
40 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32
41 #define FLAT_KERNEL_DS FLAT_KERNEL_DS64
42 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64
43 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32
44 #define FLAT_KERNEL_CS FLAT_KERNEL_CS64
45 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64
46 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32
47 #define FLAT_KERNEL_SS FLAT_KERNEL_SS64
49 #define FLAT_USER_DS64 FLAT_RING3_DS64
50 #define FLAT_USER_DS32 FLAT_RING3_DS32
51 #define FLAT_USER_DS FLAT_USER_DS64
52 #define FLAT_USER_CS64 FLAT_RING3_CS64
53 #define FLAT_USER_CS32 FLAT_RING3_CS32
54 #define FLAT_USER_CS FLAT_USER_CS64
55 #define FLAT_USER_SS64 FLAT_RING3_SS64
56 #define FLAT_USER_SS32 FLAT_RING3_SS32
57 #define FLAT_USER_SS FLAT_USER_SS64
59 /* And the trap vector is... */
60 #define TRAP_INSTR "syscall"
62 #ifndef HYPERVISOR_VIRT_START
63 #define HYPERVISOR_VIRT_START (0xFFFF800000000000UL)
64 #define HYPERVISOR_VIRT_END (0xFFFF880000000000UL)
65 #endif
67 /* Maximum number of virtual CPUs in multi-processor guests. */
68 #define MAX_VIRT_CPUS 32
70 #ifndef __ASSEMBLY__
72 /* The machine->physical mapping table starts at this address, read-only. */
73 #ifndef machine_to_phys_mapping
74 #define machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
75 #endif
77 /*
78 * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
79 * @which == SEGBASE_* ; @base == 64-bit base address
80 * Returns 0 on success.
81 */
82 #define SEGBASE_FS 0
83 #define SEGBASE_GS_USER 1
84 #define SEGBASE_GS_KERNEL 2
85 #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
87 /*
88 * int HYPERVISOR_switch_to_user(void)
89 * All arguments are on the kernel stack, in the following format.
90 * Never returns if successful. Current kernel context is lost.
91 * If flags contains VGCF_IN_SYSCALL:
92 * Restore RAX, RIP, RFLAGS, RSP.
93 * Discard R11, RCX, CS, SS.
94 * Otherwise:
95 * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
96 * All other registers are saved on hypercall entry and restored to user.
97 */
98 /* Guest exited in SYSCALL context? Return to guest with SYSRET? */
99 #define VGCF_IN_SYSCALL (1<<8)
100 struct switch_to_user {
101 /* Top of stack (%rsp at point of hypercall). */
102 u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
103 /* Bottom of switch_to_user stack frame. */
104 };
106 /* NB. Both the following are 64 bits each. */
107 typedef unsigned long memory_t; /* Full-sized pointer/address/memory-size. */
109 /*
110 * Send an array of these to HYPERVISOR_set_trap_table().
111 * N.B. As in x86/32 mode, the privilege level specifies which modes may enter
112 * a trap via a software interrupt. Since rings 1 and 2 are unavailable, we
113 * allocate privilege levels as follows:
114 * Level == 0: Noone may enter
115 * Level == 1: Kernel may enter
116 * Level == 2: Kernel may enter
117 * Level == 3: Everyone may enter
118 */
119 #define TI_GET_DPL(_ti) ((_ti)->flags & 3)
120 #define TI_GET_IF(_ti) ((_ti)->flags & 4)
121 #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
122 #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
123 typedef struct trap_info {
124 u8 vector; /* exception vector */
125 u8 flags; /* 0-3: privilege level; 4: clear event enable? */
126 u16 cs; /* code selector */
127 memory_t address; /* code address */
128 } trap_info_t;
130 typedef struct cpu_user_regs {
131 u64 r15;
132 u64 r14;
133 u64 r13;
134 u64 r12;
135 union { u64 rbp, ebp; };
136 union { u64 rbx, ebx; };
137 u64 r11;
138 u64 r10;
139 u64 r9;
140 u64 r8;
141 union { u64 rax, eax; };
142 union { u64 rcx, ecx; };
143 union { u64 rdx, edx; };
144 union { u64 rsi, esi; };
145 union { u64 rdi, edi; };
146 u32 error_code; /* private */
147 u32 entry_vector; /* private */
148 union { u64 rip, eip; };
149 u16 cs, _pad0[1];
150 u8 saved_upcall_mask;
151 u8 _pad1[3];
152 union { u64 rflags, eflags; };
153 union { u64 rsp, esp; };
154 u16 ss, _pad2[3];
155 u16 es, _pad3[3];
156 u16 ds, _pad4[3];
157 u16 fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
158 u16 gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_user. */
159 } cpu_user_regs_t;
161 typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
163 /*
164 * The following is all CPU context. Note that the fpu_ctxt block is filled
165 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
166 */
167 typedef struct vcpu_guest_context {
168 /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
169 struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
170 #define VGCF_I387_VALID (1<<0)
171 #define VGCF_VMX_GUEST (1<<1)
172 #define VGCF_IN_KERNEL (1<<2)
173 unsigned long flags; /* VGCF_* flags */
174 cpu_user_regs_t user_regs; /* User-level CPU registers */
175 trap_info_t trap_ctxt[256]; /* Virtual IDT */
176 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
177 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
178 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
179 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
180 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
181 unsigned long event_callback_eip;
182 unsigned long failsafe_callback_eip;
183 unsigned long syscall_callback_eip;
184 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
185 /* Segment base addresses. */
186 u64 fs_base;
187 u64 gs_base_kernel;
188 u64 gs_base_user;
189 } vcpu_guest_context_t;
191 typedef struct arch_shared_info {
192 /* MFN of a table of MFNs that make up p2m table */
193 u64 pfn_to_mfn_frame_list;
194 } arch_shared_info_t;
196 #endif /* !__ASSEMBLY__ */
198 #endif