ia64/xen-unstable

view xen/include/asm-x86/domain.h @ 13915:a00b8d3800a8

[XEN] Snapshot PAE l3es when they are shadowed.
We don't update the shadows so we mustn't look at the guest l3es
or we'll be confused by them if they change.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Wed Feb 14 14:46:18 2007 +0000 (2007-02-14)
parents 6daa91dc9247
children 1e5a83fb928b
line source
1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
4 #include <xen/config.h>
5 #include <xen/mm.h>
6 #include <asm/hvm/vcpu.h>
7 #include <asm/hvm/domain.h>
8 #include <asm/e820.h>
10 struct trap_bounce {
11 unsigned long error_code;
12 unsigned short flags; /* TBF_ */
13 unsigned short cs;
14 unsigned long eip;
15 };
17 #define MAPHASH_ENTRIES 8
18 #define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1))
19 #define MAPHASHENT_NOTINUSE ((u16)~0U)
20 struct vcpu_maphash {
21 struct vcpu_maphash_entry {
22 unsigned long mfn;
23 uint16_t idx;
24 uint16_t refcnt;
25 } hash[MAPHASH_ENTRIES];
26 } __cacheline_aligned;
28 #define MAPCACHE_ORDER 10
29 #define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER)
30 struct mapcache {
31 /* The PTEs that provide the mappings, and a cursor into the array. */
32 l1_pgentry_t *l1tab;
33 unsigned int cursor;
35 /* Protects map_domain_page(). */
36 spinlock_t lock;
38 /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */
39 unsigned int epoch, shadow_epoch[MAX_VIRT_CPUS];
40 u32 tlbflush_timestamp;
42 /* Which mappings are in use, and which are garbage to reap next epoch? */
43 unsigned long inuse[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
44 unsigned long garbage[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
46 /* Lock-free per-VCPU hash of recently-used mappings. */
47 struct vcpu_maphash vcpu_maphash[MAX_VIRT_CPUS];
48 };
50 extern void mapcache_init(struct domain *);
52 /* x86/64: toggle guest between kernel and user modes. */
53 extern void toggle_guest_mode(struct vcpu *);
55 /*
56 * Initialise a hypercall-transfer page. The given pointer must be mapped
57 * in Xen virtual address space (accesses are not validated or checked).
58 */
59 extern void hypercall_page_initialise(struct domain *d, void *);
61 /************************************************/
62 /* shadow paging extension */
63 /************************************************/
64 struct shadow_domain {
65 spinlock_t lock; /* shadow domain lock */
66 int locker; /* processor which holds the lock */
67 const char *locker_function; /* Func that took it */
68 unsigned int opt_flags; /* runtime tunable optimizations on/off */
69 struct list_head pinned_shadows;
71 /* Memory allocation */
72 struct list_head freelists[SHADOW_MAX_ORDER + 1];
73 struct list_head p2m_freelist;
74 unsigned int total_pages; /* number of pages allocated */
75 unsigned int free_pages; /* number of pages on freelists */
76 unsigned int p2m_pages; /* number of pages allocates to p2m */
78 /* Shadow hashtable */
79 struct shadow_page_info **hash_table;
80 int hash_walking; /* Some function is walking the hash table */
82 /* Shadow log-dirty bitmap */
83 unsigned long *dirty_bitmap;
84 unsigned int dirty_bitmap_size; /* in pages, bit per page */
86 /* Shadow log-dirty mode stats */
87 unsigned int fault_count;
88 unsigned int dirty_count;
89 };
91 struct shadow_vcpu {
92 #if CONFIG_PAGING_LEVELS >= 3
93 /* PAE guests: per-vcpu shadow top-level table */
94 l3_pgentry_t l3table[4] __attribute__((__aligned__(32)));
95 /* PAE guests: per-vcpu cache of the top-level *guest* entries */
96 l3_pgentry_t gl3e[4] __attribute__((__aligned__(32)));
97 #endif
98 /* Non-PAE guests: pointer to guest top-level pagetable */
99 void *guest_vtable;
100 /* Last MFN that we emulated a write to. */
101 unsigned long last_emulated_mfn;
102 /* MFN of the last shadow that we shot a writeable mapping in */
103 unsigned long last_writeable_pte_smfn;
104 };
106 /************************************************/
107 /* p2m handling */
108 /************************************************/
110 struct p2m_domain {
111 /* Lock that protects updates to the p2m */
112 spinlock_t lock;
113 int locker; /* processor which holds the lock */
114 const char *locker_function; /* Func that took it */
116 /* Pages used to construct the p2m */
117 struct list_head pages;
119 /* Functions to call to get or free pages for the p2m */
120 struct page_info * (*alloc_page )(struct domain *d);
121 void (*free_page )(struct domain *d,
122 struct page_info *pg);
124 /* Highest guest frame that's ever been mapped in the p2m */
125 unsigned long max_mapped_pfn;
126 };
128 /************************************************/
129 /* common paging data structure */
130 /************************************************/
131 struct paging_domain {
132 u32 mode; /* flags to control paging operation */
134 /* extension for shadow paging support */
135 struct shadow_domain shadow;
137 /* Other paging assistance code will have structs here */
138 };
140 struct paging_vcpu {
141 /* Pointers to mode-specific entry points. */
142 struct paging_mode *mode;
143 /* HVM guest: paging enabled (CR0.PG)? */
144 unsigned int translate_enabled:1;
146 /* paging support extension */
147 struct shadow_vcpu shadow;
148 };
150 struct arch_domain
151 {
152 l1_pgentry_t *mm_perdomain_pt;
153 #ifdef CONFIG_X86_64
154 l2_pgentry_t *mm_perdomain_l2;
155 l3_pgentry_t *mm_perdomain_l3;
156 #endif
158 #ifdef CONFIG_X86_32
159 /* map_domain_page() mapping cache. */
160 struct mapcache mapcache;
161 #endif
163 #ifdef CONFIG_COMPAT
164 unsigned int hv_compat_vstart;
165 l3_pgentry_t *mm_arg_xlat_l3;
166 #endif
168 /* I/O-port admin-specified access capabilities. */
169 struct rangeset *ioport_caps;
171 struct hvm_domain hvm_domain;
173 struct paging_domain paging;
174 struct p2m_domain p2m ;
176 /* Shadow translated domain: P2M mapping */
177 pagetable_t phys_table;
179 /* Pseudophysical e820 map (XENMEM_memory_map). */
180 struct e820entry e820[3];
181 unsigned int nr_e820;
182 } __cacheline_aligned;
184 #ifdef CONFIG_X86_PAE
185 struct pae_l3_cache {
186 /*
187 * Two low-memory (<4GB) PAE L3 tables, used as fallback when the guest
188 * supplies a >=4GB PAE L3 table. We need two because we cannot set up
189 * an L3 table while we are currently running on it (without using
190 * expensive atomic 64-bit operations).
191 */
192 l3_pgentry_t table[2][4] __attribute__((__aligned__(32)));
193 unsigned long high_mfn; /* The >=4GB MFN being shadowed. */
194 unsigned int inuse_idx; /* Which of the two cache slots is in use? */
195 spinlock_t lock;
196 };
197 #define pae_l3_cache_init(c) spin_lock_init(&(c)->lock)
198 #else /* !CONFIG_X86_PAE */
199 struct pae_l3_cache { };
200 #define pae_l3_cache_init(c) ((void)0)
201 #endif
203 struct arch_vcpu
204 {
205 /* Needs 16-byte aligment for FXSAVE/FXRSTOR. */
206 struct vcpu_guest_context guest_context
207 __attribute__((__aligned__(16)));
209 struct pae_l3_cache pae_l3_cache;
211 unsigned long flags; /* TF_ */
213 void (*schedule_tail) (struct vcpu *);
215 void (*ctxt_switch_from) (struct vcpu *);
216 void (*ctxt_switch_to) (struct vcpu *);
218 /* Bounce information for propagating an exception to guest OS. */
219 struct trap_bounce trap_bounce;
221 /* I/O-port access bitmap. */
222 XEN_GUEST_HANDLE(uint8_t) iobmp; /* Guest kernel virtual address of the bitmap. */
223 int iobmp_limit; /* Number of ports represented in the bitmap. */
224 int iopl; /* Current IOPL for this VCPU. */
226 #ifdef CONFIG_X86_32
227 struct desc_struct int80_desc;
228 #endif
230 /* Virtual Machine Extensions */
231 struct hvm_vcpu hvm_vcpu;
233 /*
234 * Every domain has a L1 pagetable of its own. Per-domain mappings
235 * are put in this table (eg. the current GDT is mapped here).
236 */
237 l1_pgentry_t *perdomain_ptes;
239 #ifdef CONFIG_X86_64
240 pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */
241 #endif
242 pagetable_t guest_table; /* (MFN) guest notion of cr3 */
243 /* guest_table holds a ref to the page, and also a type-count unless
244 * shadow refcounts are in use */
245 pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */
246 pagetable_t monitor_table; /* (MFN) hypervisor PT (for HVM) */
247 unsigned long cr3; /* (MA) value to install in HW CR3 */
249 /* Current LDT details. */
250 unsigned long shadow_ldt_mapcnt;
252 struct paging_vcpu paging;
253 } __cacheline_aligned;
255 /* shorthands to improve code legibility */
256 #define hvm_vmx hvm_vcpu.u.vmx
257 #define hvm_svm hvm_vcpu.u.svm
259 #endif /* __ASM_DOMAIN_H__ */
261 /*
262 * Local variables:
263 * mode: C
264 * c-set-style: "BSD"
265 * c-basic-offset: 4
266 * tab-width: 4
267 * indent-tabs-mode: nil
268 * End:
269 */