ia64/xen-unstable

view xen/include/asm-x86/domain.h @ 15828:3b50a7e52ff2

Implement x86 continuable domain destroy.
This patch addresses the following bug report.
http://bugzilla.xensource.com/bugzilla/show_bug.cgi?id=1037
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author kfraser@localhost.localdomain
date Fri Aug 31 17:00:11 2007 +0100 (2007-08-31)
parents 86a154e1ef5d
children e1b574bc36b5
line source
1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
4 #include <xen/config.h>
5 #include <xen/mm.h>
6 #include <asm/hvm/vcpu.h>
7 #include <asm/hvm/domain.h>
8 #include <asm/e820.h>
10 #define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo)
11 #define is_pv_32bit_domain(d) ((d)->arch.is_32bit_pv)
12 #define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain))
13 #ifdef __x86_64__
14 #define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
15 #else
16 #define is_pv_32on64_domain(d) (0)
17 #endif
18 #define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain))
19 #define IS_COMPAT(d) (is_pv_32on64_domain(d))
21 struct trap_bounce {
22 uint32_t error_code;
23 uint8_t flags; /* TBF_ */
24 uint16_t cs;
25 unsigned long eip;
26 };
28 #define MAPHASH_ENTRIES 8
29 #define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1))
30 #define MAPHASHENT_NOTINUSE ((u16)~0U)
31 struct vcpu_maphash {
32 struct vcpu_maphash_entry {
33 unsigned long mfn;
34 uint16_t idx;
35 uint16_t refcnt;
36 } hash[MAPHASH_ENTRIES];
37 } __cacheline_aligned;
39 #define MAPCACHE_ORDER 10
40 #define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER)
41 struct mapcache {
42 /* The PTEs that provide the mappings, and a cursor into the array. */
43 l1_pgentry_t *l1tab;
44 unsigned int cursor;
46 /* Protects map_domain_page(). */
47 spinlock_t lock;
49 /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */
50 unsigned int epoch, shadow_epoch[MAX_VIRT_CPUS];
51 u32 tlbflush_timestamp;
53 /* Which mappings are in use, and which are garbage to reap next epoch? */
54 unsigned long inuse[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
55 unsigned long garbage[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
57 /* Lock-free per-VCPU hash of recently-used mappings. */
58 struct vcpu_maphash vcpu_maphash[MAX_VIRT_CPUS];
59 };
61 extern void mapcache_init(struct domain *);
63 /* x86/64: toggle guest between kernel and user modes. */
64 extern void toggle_guest_mode(struct vcpu *);
66 /*
67 * Initialise a hypercall-transfer page. The given pointer must be mapped
68 * in Xen virtual address space (accesses are not validated or checked).
69 */
70 extern void hypercall_page_initialise(struct domain *d, void *);
72 /************************************************/
73 /* shadow paging extension */
74 /************************************************/
75 struct shadow_domain {
76 spinlock_t lock; /* shadow domain lock */
77 int locker; /* processor which holds the lock */
78 const char *locker_function; /* Func that took it */
79 unsigned int opt_flags; /* runtime tunable optimizations on/off */
80 struct list_head pinned_shadows;
82 /* Memory allocation */
83 struct list_head freelists[SHADOW_MAX_ORDER + 1];
84 struct list_head p2m_freelist;
85 unsigned int total_pages; /* number of pages allocated */
86 unsigned int free_pages; /* number of pages on freelists */
87 unsigned int p2m_pages; /* number of pages allocates to p2m */
89 /* 1-to-1 map for use when HVM vcpus have paging disabled */
90 pagetable_t unpaged_pagetable;
92 /* Shadow hashtable */
93 struct shadow_page_info **hash_table;
94 int hash_walking; /* Some function is walking the hash table */
96 /* Fast MMIO path heuristic */
97 int has_fast_mmio_entries;
98 };
100 struct shadow_vcpu {
101 #if CONFIG_PAGING_LEVELS >= 3
102 /* PAE guests: per-vcpu shadow top-level table */
103 l3_pgentry_t l3table[4] __attribute__((__aligned__(32)));
104 /* PAE guests: per-vcpu cache of the top-level *guest* entries */
105 l3_pgentry_t gl3e[4] __attribute__((__aligned__(32)));
106 #endif
107 /* Non-PAE guests: pointer to guest top-level pagetable */
108 void *guest_vtable;
109 /* Last MFN that we emulated a write to. */
110 unsigned long last_emulated_mfn;
111 /* MFN of the last shadow that we shot a writeable mapping in */
112 unsigned long last_writeable_pte_smfn;
113 };
115 /************************************************/
116 /* hardware assisted paging */
117 /************************************************/
118 struct hap_domain {
119 spinlock_t lock;
120 int locker;
121 const char *locker_function;
123 struct list_head freelist;
124 unsigned int total_pages; /* number of pages allocated */
125 unsigned int free_pages; /* number of pages on freelists */
126 unsigned int p2m_pages; /* number of pages allocates to p2m */
127 };
129 /************************************************/
130 /* p2m handling */
131 /************************************************/
132 struct p2m_domain {
133 /* Lock that protects updates to the p2m */
134 spinlock_t lock;
135 int locker; /* processor which holds the lock */
136 const char *locker_function; /* Func that took it */
138 /* Pages used to construct the p2m */
139 struct list_head pages;
141 /* Functions to call to get or free pages for the p2m */
142 struct page_info * (*alloc_page )(struct domain *d);
143 void (*free_page )(struct domain *d,
144 struct page_info *pg);
146 /* Highest guest frame that's ever been mapped in the p2m */
147 unsigned long max_mapped_pfn;
148 };
150 /************************************************/
151 /* common paging data structure */
152 /************************************************/
153 struct log_dirty_domain {
154 /* log-dirty lock */
155 spinlock_t lock;
156 int locker; /* processor that holds the lock */
157 const char *locker_function; /* func that took it */
159 /* log-dirty bitmap to record dirty pages */
160 unsigned long *bitmap;
161 unsigned int bitmap_size; /* in pages, bit per page */
163 /* log-dirty mode stats */
164 unsigned int fault_count;
165 unsigned int dirty_count;
167 /* functions which are paging mode specific */
168 int (*enable_log_dirty )(struct domain *d);
169 int (*disable_log_dirty )(struct domain *d);
170 void (*clean_dirty_bitmap )(struct domain *d);
171 };
173 struct paging_domain {
174 /* flags to control paging operation */
175 u32 mode;
176 /* extension for shadow paging support */
177 struct shadow_domain shadow;
178 /* extension for hardware-assited paging */
179 struct hap_domain hap;
180 /* log dirty support */
181 struct log_dirty_domain log_dirty;
182 };
184 struct paging_vcpu {
185 /* Pointers to mode-specific entry points. */
186 struct paging_mode *mode;
187 /* HVM guest: last emulate was to a pagetable */
188 unsigned int last_write_was_pt:1;
189 /* Translated guest: virtual TLB */
190 struct shadow_vtlb *vtlb;
191 spinlock_t vtlb_lock;
193 /* paging support extension */
194 struct shadow_vcpu shadow;
195 };
197 struct arch_domain
198 {
199 l1_pgentry_t *mm_perdomain_pt;
200 #ifdef CONFIG_X86_64
201 l2_pgentry_t *mm_perdomain_l2;
202 l3_pgentry_t *mm_perdomain_l3;
203 #endif
205 #ifdef CONFIG_X86_32
206 /* map_domain_page() mapping cache. */
207 struct mapcache mapcache;
208 #endif
210 #ifdef CONFIG_COMPAT
211 unsigned int hv_compat_vstart;
212 l3_pgentry_t *mm_arg_xlat_l3;
213 #endif
215 /* I/O-port admin-specified access capabilities. */
216 struct rangeset *ioport_caps;
218 struct hvm_domain hvm_domain;
220 struct paging_domain paging;
221 struct p2m_domain p2m ;
223 /* Shadow translated domain: P2M mapping */
224 pagetable_t phys_table;
226 /* Pseudophysical e820 map (XENMEM_memory_map). */
227 struct e820entry e820[3];
228 unsigned int nr_e820;
230 /* Maximum physical-address bitwidth supported by this guest. */
231 unsigned int physaddr_bitsize;
233 /* Is a 32-bit PV (non-HVM) guest? */
234 bool_t is_32bit_pv;
235 /* Is shared-info page in 32-bit format? */
236 bool_t has_32bit_shinfo;
238 /* Continuable domain_relinquish_resources(). */
239 enum {
240 RELMEM_not_started,
241 RELMEM_xen_l4,
242 RELMEM_dom_l4,
243 RELMEM_xen_l3,
244 RELMEM_dom_l3,
245 RELMEM_xen_l2,
246 RELMEM_dom_l2,
247 RELMEM_done,
248 } relmem;
249 struct list_head relmem_list;
250 } __cacheline_aligned;
252 #ifdef CONFIG_X86_PAE
253 struct pae_l3_cache {
254 /*
255 * Two low-memory (<4GB) PAE L3 tables, used as fallback when the guest
256 * supplies a >=4GB PAE L3 table. We need two because we cannot set up
257 * an L3 table while we are currently running on it (without using
258 * expensive atomic 64-bit operations).
259 */
260 l3_pgentry_t table[2][4] __attribute__((__aligned__(32)));
261 unsigned long high_mfn; /* The >=4GB MFN being shadowed. */
262 unsigned int inuse_idx; /* Which of the two cache slots is in use? */
263 spinlock_t lock;
264 };
265 #define pae_l3_cache_init(c) spin_lock_init(&(c)->lock)
266 #else /* !CONFIG_X86_PAE */
267 struct pae_l3_cache { };
268 #define pae_l3_cache_init(c) ((void)0)
269 #endif
271 struct arch_vcpu
272 {
273 /* Needs 16-byte aligment for FXSAVE/FXRSTOR. */
274 struct vcpu_guest_context guest_context
275 __attribute__((__aligned__(16)));
277 struct pae_l3_cache pae_l3_cache;
279 unsigned long flags; /* TF_ */
281 void (*schedule_tail) (struct vcpu *);
283 void (*ctxt_switch_from) (struct vcpu *);
284 void (*ctxt_switch_to) (struct vcpu *);
286 /* Record information required to continue execution after migration */
287 void *continue_info;
289 /* Bounce information for propagating an exception to guest OS. */
290 struct trap_bounce trap_bounce;
292 /* I/O-port access bitmap. */
293 XEN_GUEST_HANDLE(uint8_t) iobmp; /* Guest kernel virtual address of the bitmap. */
294 int iobmp_limit; /* Number of ports represented in the bitmap. */
295 int iopl; /* Current IOPL for this VCPU. */
297 #ifdef CONFIG_X86_32
298 struct desc_struct int80_desc;
299 #endif
300 #ifdef CONFIG_X86_64
301 struct trap_bounce int80_bounce;
302 #endif
304 /* Virtual Machine Extensions */
305 struct hvm_vcpu hvm_vcpu;
307 /*
308 * Every domain has a L1 pagetable of its own. Per-domain mappings
309 * are put in this table (eg. the current GDT is mapped here).
310 */
311 l1_pgentry_t *perdomain_ptes;
313 #ifdef CONFIG_X86_64
314 pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */
315 #endif
316 pagetable_t guest_table; /* (MFN) guest notion of cr3 */
317 /* guest_table holds a ref to the page, and also a type-count unless
318 * shadow refcounts are in use */
319 pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */
320 pagetable_t monitor_table; /* (MFN) hypervisor PT (for HVM) */
321 unsigned long cr3; /* (MA) value to install in HW CR3 */
323 /* Current LDT details. */
324 unsigned long shadow_ldt_mapcnt;
326 struct paging_vcpu paging;
328 /* Guest-specified relocation of vcpu_info. */
329 unsigned long vcpu_info_mfn;
330 } __cacheline_aligned;
332 /* Shorthands to improve code legibility. */
333 #define hvm_vmx hvm_vcpu.u.vmx
334 #define hvm_svm hvm_vcpu.u.svm
336 /* Continue the current hypercall via func(data) on specified cpu. */
337 int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data);
339 #endif /* __ASM_DOMAIN_H__ */
341 /*
342 * Local variables:
343 * mode: C
344 * c-set-style: "BSD"
345 * c-basic-offset: 4
346 * tab-width: 4
347 * indent-tabs-mode: nil
348 * End:
349 */