direct-io.hg

view xen/include/asm-x86/domain.h @ 10173:954f4dea9da6

[PAE] Allow pgdirs above 4GB for paravirt guests.
**NOTE**: This obviates the need for lowmem_emergency_pool.
Unpriv guests no longer need to be able to allocate memory
below 4GB for PAE PDPTs.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri May 26 17:22:30 2006 +0100 (2006-05-26)
parents 760f9149dbaa
children d5f98d23427a
line source
1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
4 #include <xen/config.h>
5 #include <xen/mm.h>
6 #include <asm/hvm/vcpu.h>
7 #include <asm/hvm/domain.h>
9 struct trap_bounce {
10 unsigned long error_code;
11 unsigned short flags; /* TBF_ */
12 unsigned short cs;
13 unsigned long eip;
14 };
16 #define MAPHASH_ENTRIES 8
17 #define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1))
18 #define MAPHASHENT_NOTINUSE ((u16)~0U)
19 struct vcpu_maphash {
20 struct vcpu_maphash_entry {
21 unsigned long pfn;
22 uint16_t idx;
23 uint16_t refcnt;
24 } hash[MAPHASH_ENTRIES];
25 } __cacheline_aligned;
27 #define MAPCACHE_ORDER 10
28 #define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER)
29 struct mapcache {
30 /* The PTEs that provide the mappings, and a cursor into the array. */
31 l1_pgentry_t *l1tab;
32 unsigned int cursor;
34 /* Protects map_domain_page(). */
35 spinlock_t lock;
37 /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */
38 unsigned int epoch, shadow_epoch[MAX_VIRT_CPUS];
39 u32 tlbflush_timestamp;
41 /* Which mappings are in use, and which are garbage to reap next epoch? */
42 unsigned long inuse[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
43 unsigned long garbage[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
45 /* Lock-free per-VCPU hash of recently-used mappings. */
46 struct vcpu_maphash vcpu_maphash[MAX_VIRT_CPUS];
47 };
49 extern void mapcache_init(struct domain *);
51 /* x86/64: toggle guest between kernel and user modes. */
52 extern void toggle_guest_mode(struct vcpu *);
54 /*
55 * Initialise a hypercall-transfer page. The given pointer must be mapped
56 * in Xen virtual address space (accesses are not validated or checked).
57 */
58 extern void hypercall_page_initialise(void *);
60 struct arch_domain
61 {
62 l1_pgentry_t *mm_perdomain_pt;
63 #ifdef CONFIG_X86_64
64 l2_pgentry_t *mm_perdomain_l2;
65 l3_pgentry_t *mm_perdomain_l3;
66 #endif
68 #ifdef CONFIG_X86_32
69 /* map_domain_page() mapping cache. */
70 struct mapcache mapcache;
71 #endif
73 /* Writable pagetables. */
74 struct ptwr_info ptwr[2];
76 /* I/O-port admin-specified access capabilities. */
77 struct rangeset *ioport_caps;
79 /* Shadow mode status and controls. */
80 struct shadow_ops *ops;
81 unsigned int shadow_mode; /* flags to control shadow table operation */
82 unsigned int shadow_nest; /* Recursive depth of shadow_lock() nesting */
84 /* shadow hashtable */
85 struct shadow_status *shadow_ht;
86 struct shadow_status *shadow_ht_free;
87 struct shadow_status *shadow_ht_extras; /* extra allocation units */
88 unsigned int shadow_extras_count;
90 /* shadow dirty bitmap */
91 unsigned long *shadow_dirty_bitmap;
92 unsigned int shadow_dirty_bitmap_size; /* in pages, bit per page */
94 /* shadow mode stats */
95 unsigned int shadow_page_count;
96 unsigned int hl2_page_count;
97 unsigned int snapshot_page_count;
99 unsigned int shadow_fault_count;
100 unsigned int shadow_dirty_count;
102 /* full shadow mode */
103 struct out_of_sync_entry *out_of_sync; /* list of out-of-sync pages */
104 struct out_of_sync_entry *out_of_sync_free;
105 struct out_of_sync_entry *out_of_sync_extras;
106 unsigned int out_of_sync_extras_count;
108 struct list_head free_shadow_frames;
110 pagetable_t phys_table; /* guest 1:1 pagetable */
111 struct hvm_domain hvm_domain;
113 /* Shadow-translated guest: Pseudophys base address of reserved area. */
114 unsigned long first_reserved_pfn;
115 } __cacheline_aligned;
117 struct arch_vcpu
118 {
119 /* Needs 16-byte aligment for FXSAVE/FXRSTOR. */
120 struct vcpu_guest_context guest_context
121 __attribute__((__aligned__(16)));
123 #ifdef CONFIG_X86_PAE
124 /*
125 * Two low-memory (<4GB) PAE L3 tables, used as fallback when the guest
126 * supplies a >=4GB PAE L3 table. We need two because we cannot set up
127 * an L3 table while we are currently running on it (without using
128 * expensive atomic 64-bit operations).
129 */
130 l3_pgentry_t lowmem_l3tab[2][4] __attribute__((__aligned__(32)));
131 unsigned long lowmem_l3tab_high_mfn[2]; /* The >=4GB MFN being shadowed. */
132 unsigned int lowmem_l3tab_inuse; /* Which lowmem_l3tab is in use? */
133 #endif
135 unsigned long flags; /* TF_ */
137 void (*schedule_tail) (struct vcpu *);
139 void (*ctxt_switch_from) (struct vcpu *);
140 void (*ctxt_switch_to) (struct vcpu *);
142 /* Bounce information for propagating an exception to guest OS. */
143 struct trap_bounce trap_bounce;
145 /* I/O-port access bitmap. */
146 u8 *iobmp; /* Guest kernel virtual address of the bitmap. */
147 int iobmp_limit; /* Number of ports represented in the bitmap. */
148 int iopl; /* Current IOPL for this VCPU. */
150 #ifdef CONFIG_X86_32
151 struct desc_struct int80_desc;
152 #endif
154 /* Virtual Machine Extensions */
155 struct hvm_vcpu hvm_vcpu;
157 /*
158 * Every domain has a L1 pagetable of its own. Per-domain mappings
159 * are put in this table (eg. the current GDT is mapped here).
160 */
161 l1_pgentry_t *perdomain_ptes;
163 pagetable_t guest_table_user; /* x86/64: user-space pagetable. */
164 pagetable_t guest_table; /* (MA) guest notion of cr3 */
165 pagetable_t shadow_table; /* (MA) shadow of guest */
166 pagetable_t monitor_table; /* (MA) used in hypervisor */
168 l2_pgentry_t *guest_vtable; /* virtual address of pagetable */
169 l2_pgentry_t *shadow_vtable; /* virtual address of shadow_table */
170 l2_pgentry_t *monitor_vtable; /* virtual address of monitor_table */
171 l1_pgentry_t *hl2_vtable; /* virtual address of hl2_table */
173 #ifdef CONFIG_X86_64
174 l3_pgentry_t *guest_vl3table;
175 l4_pgentry_t *guest_vl4table;
176 #endif
178 unsigned long monitor_shadow_ref;
180 /* Current LDT details. */
181 unsigned long shadow_ldt_mapcnt;
182 } __cacheline_aligned;
184 /* shorthands to improve code legibility */
185 #define hvm_vmx hvm_vcpu.u.vmx
186 #define hvm_svm hvm_vcpu.u.svm
188 #endif /* __ASM_DOMAIN_H__ */
190 /*
191 * Local variables:
192 * mode: C
193 * c-set-style: "BSD"
194 * c-basic-offset: 4
195 * tab-width: 4
196 * indent-tabs-mode: nil
197 * End:
198 */