ia64/xen-unstable

view xen/include/asm-x86/domain.h @ 12564:2fd223c64fc6

[XEN] Pin l3 shadows of older x86_64 linux guests.
Older x86_64 linux kernels use one l4 table per cpu and context switch by
changing an l4 entry pointing to an l3 table. If we're shadowing them
we need to pin l3 shadows to stop them being torn down on every
context switch. (But don't do this for normal 64bit guests).
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Thu Nov 23 17:46:52 2006 +0000 (2006-11-23)
parents 7a38b70788a5
children 5c82a274733e
line source
1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
4 #include <xen/config.h>
5 #include <xen/mm.h>
6 #include <asm/hvm/vcpu.h>
7 #include <asm/hvm/domain.h>
9 struct trap_bounce {
10 unsigned long error_code;
11 unsigned short flags; /* TBF_ */
12 unsigned short cs;
13 unsigned long eip;
14 };
16 #define MAPHASH_ENTRIES 8
17 #define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1))
18 #define MAPHASHENT_NOTINUSE ((u16)~0U)
19 struct vcpu_maphash {
20 struct vcpu_maphash_entry {
21 unsigned long mfn;
22 uint16_t idx;
23 uint16_t refcnt;
24 } hash[MAPHASH_ENTRIES];
25 } __cacheline_aligned;
27 #define MAPCACHE_ORDER 10
28 #define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER)
29 struct mapcache {
30 /* The PTEs that provide the mappings, and a cursor into the array. */
31 l1_pgentry_t *l1tab;
32 unsigned int cursor;
34 /* Protects map_domain_page(). */
35 spinlock_t lock;
37 /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */
38 unsigned int epoch, shadow_epoch[MAX_VIRT_CPUS];
39 u32 tlbflush_timestamp;
41 /* Which mappings are in use, and which are garbage to reap next epoch? */
42 unsigned long inuse[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
43 unsigned long garbage[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
45 /* Lock-free per-VCPU hash of recently-used mappings. */
46 struct vcpu_maphash vcpu_maphash[MAX_VIRT_CPUS];
47 };
49 extern void mapcache_init(struct domain *);
51 /* x86/64: toggle guest between kernel and user modes. */
52 extern void toggle_guest_mode(struct vcpu *);
54 /*
55 * Initialise a hypercall-transfer page. The given pointer must be mapped
56 * in Xen virtual address space (accesses are not validated or checked).
57 */
58 extern void hypercall_page_initialise(struct domain *d, void *);
60 struct shadow_domain {
61 u32 mode; /* flags to control shadow operation */
62 spinlock_t lock; /* shadow domain lock */
63 int locker; /* processor which holds the lock */
64 const char *locker_function; /* Func that took it */
65 struct list_head freelists[SHADOW_MAX_ORDER + 1];
66 struct list_head p2m_freelist;
67 struct list_head p2m_inuse;
68 struct list_head pinned_shadows;
69 unsigned int total_pages; /* number of pages allocated */
70 unsigned int free_pages; /* number of pages on freelists */
71 unsigned int p2m_pages; /* number of pages in p2m map */
72 unsigned int opt_flags; /* runtime tunable optimizations on/off */
74 /* Shadow hashtable */
75 struct shadow_page_info **hash_table;
76 int hash_walking; /* Some function is walking the hash table */
78 /* Shadow log-dirty bitmap */
79 unsigned long *dirty_bitmap;
80 unsigned int dirty_bitmap_size; /* in pages, bit per page */
82 /* Shadow log-dirty mode stats */
83 unsigned int fault_count;
84 unsigned int dirty_count;
85 };
87 struct arch_domain
88 {
89 l1_pgentry_t *mm_perdomain_pt;
90 #ifdef CONFIG_X86_64
91 l2_pgentry_t *mm_perdomain_l2;
92 l3_pgentry_t *mm_perdomain_l3;
93 #endif
95 #ifdef CONFIG_X86_32
96 /* map_domain_page() mapping cache. */
97 struct mapcache mapcache;
98 #endif
100 /* I/O-port admin-specified access capabilities. */
101 struct rangeset *ioport_caps;
103 /* HVM stuff */
104 struct hvm_domain hvm_domain;
106 /* Shadow-translated guest: Pseudophys base address of reserved area. */
107 unsigned long first_reserved_pfn;
109 struct shadow_domain shadow;
111 /* Shadow translated domain: P2M mapping */
112 pagetable_t phys_table;
113 /* Highest guest frame that's ever been mapped in the p2m */
114 unsigned long max_mapped_pfn;
116 } __cacheline_aligned;
118 #ifdef CONFIG_X86_PAE
119 struct pae_l3_cache {
120 /*
121 * Two low-memory (<4GB) PAE L3 tables, used as fallback when the guest
122 * supplies a >=4GB PAE L3 table. We need two because we cannot set up
123 * an L3 table while we are currently running on it (without using
124 * expensive atomic 64-bit operations).
125 */
126 l3_pgentry_t table[2][4] __attribute__((__aligned__(32)));
127 unsigned long high_mfn; /* The >=4GB MFN being shadowed. */
128 unsigned int inuse_idx; /* Which of the two cache slots is in use? */
129 spinlock_t lock;
130 };
131 #define pae_l3_cache_init(c) spin_lock_init(&(c)->lock)
132 #else /* !CONFIG_X86_PAE */
133 struct pae_l3_cache { };
134 #define pae_l3_cache_init(c) ((void)0)
135 #endif
137 struct shadow_vcpu {
138 #if CONFIG_PAGING_LEVELS >= 3
139 /* PAE guests: per-vcpu shadow top-level table */
140 l3_pgentry_t l3table[4] __attribute__((__aligned__(32)));
141 #endif
142 /* Pointers to mode-specific entry points. */
143 struct shadow_paging_mode *mode;
144 /* Last MFN that we emulated a write to. */
145 unsigned long last_emulated_mfn;
146 /* MFN of the last shadow that we shot a writeable mapping in */
147 unsigned long last_writeable_pte_smfn;
148 /* HVM guest: paging enabled (CR0.PG)? */
149 unsigned int translate_enabled:1;
150 /* Emulated fault needs to be propagated to guest? */
151 unsigned int propagate_fault:1;
152 };
154 struct arch_vcpu
155 {
156 /* Needs 16-byte aligment for FXSAVE/FXRSTOR. */
157 struct vcpu_guest_context guest_context
158 __attribute__((__aligned__(16)));
160 struct pae_l3_cache pae_l3_cache;
162 unsigned long flags; /* TF_ */
164 void (*schedule_tail) (struct vcpu *);
166 void (*ctxt_switch_from) (struct vcpu *);
167 void (*ctxt_switch_to) (struct vcpu *);
169 /* Bounce information for propagating an exception to guest OS. */
170 struct trap_bounce trap_bounce;
172 /* I/O-port access bitmap. */
173 XEN_GUEST_HANDLE(uint8_t) iobmp; /* Guest kernel virtual address of the bitmap. */
174 int iobmp_limit; /* Number of ports represented in the bitmap. */
175 int iopl; /* Current IOPL for this VCPU. */
177 #ifdef CONFIG_X86_32
178 struct desc_struct int80_desc;
179 #endif
181 /* Virtual Machine Extensions */
182 struct hvm_vcpu hvm_vcpu;
184 /*
185 * Every domain has a L1 pagetable of its own. Per-domain mappings
186 * are put in this table (eg. the current GDT is mapped here).
187 */
188 l1_pgentry_t *perdomain_ptes;
190 #ifdef CONFIG_X86_64
191 pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */
192 #endif
193 pagetable_t guest_table; /* (MFN) guest notion of cr3 */
194 /* guest_table holds a ref to the page, and also a type-count unless
195 * shadow refcounts are in use */
196 pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */
197 pagetable_t monitor_table; /* (MFN) hypervisor PT (for HVM) */
198 unsigned long cr3; /* (MA) value to install in HW CR3 */
200 void *guest_vtable; /* virtual addr of pagetable */
201 root_pgentry_t *monitor_vtable; /* virtual addr of monitor_table */
203 /* Current LDT details. */
204 unsigned long shadow_ldt_mapcnt;
206 struct shadow_vcpu shadow;
207 } __cacheline_aligned;
209 /* shorthands to improve code legibility */
210 #define hvm_vmx hvm_vcpu.u.vmx
211 #define hvm_svm hvm_vcpu.u.svm
213 #endif /* __ASM_DOMAIN_H__ */
215 /*
216 * Local variables:
217 * mode: C
218 * c-set-style: "BSD"
219 * c-basic-offset: 4
220 * tab-width: 4
221 * indent-tabs-mode: nil
222 * End:
223 */