direct-io.hg

view xen/include/asm-x86/domain.h @ 12803:df5fa63490f4

[XEN] Implement XENMEM_set_memory_map, which specifies memory map to
be returned by XENMEM_memory_map. Hook this into the domain builder.

Based on a patch by Glauber de Oliveira Costa <gcosta@redhat.com>

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Dec 08 11:30:30 2006 +0000 (2006-12-08)
parents 963a02c040f6
children f632c0c36976
line source
1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
4 #include <xen/config.h>
5 #include <xen/mm.h>
6 #include <asm/hvm/vcpu.h>
7 #include <asm/hvm/domain.h>
8 #include <asm/e820.h>
10 struct trap_bounce {
11 unsigned long error_code;
12 unsigned short flags; /* TBF_ */
13 unsigned short cs;
14 unsigned long eip;
15 };
17 #define MAPHASH_ENTRIES 8
18 #define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1))
19 #define MAPHASHENT_NOTINUSE ((u16)~0U)
20 struct vcpu_maphash {
21 struct vcpu_maphash_entry {
22 unsigned long mfn;
23 uint16_t idx;
24 uint16_t refcnt;
25 } hash[MAPHASH_ENTRIES];
26 } __cacheline_aligned;
28 #define MAPCACHE_ORDER 10
29 #define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER)
30 struct mapcache {
31 /* The PTEs that provide the mappings, and a cursor into the array. */
32 l1_pgentry_t *l1tab;
33 unsigned int cursor;
35 /* Protects map_domain_page(). */
36 spinlock_t lock;
38 /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */
39 unsigned int epoch, shadow_epoch[MAX_VIRT_CPUS];
40 u32 tlbflush_timestamp;
42 /* Which mappings are in use, and which are garbage to reap next epoch? */
43 unsigned long inuse[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
44 unsigned long garbage[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
46 /* Lock-free per-VCPU hash of recently-used mappings. */
47 struct vcpu_maphash vcpu_maphash[MAX_VIRT_CPUS];
48 };
50 extern void mapcache_init(struct domain *);
52 /* x86/64: toggle guest between kernel and user modes. */
53 extern void toggle_guest_mode(struct vcpu *);
55 /*
56 * Initialise a hypercall-transfer page. The given pointer must be mapped
57 * in Xen virtual address space (accesses are not validated or checked).
58 */
59 extern void hypercall_page_initialise(struct domain *d, void *);
61 struct shadow_domain {
62 u32 mode; /* flags to control shadow operation */
63 spinlock_t lock; /* shadow domain lock */
64 int locker; /* processor which holds the lock */
65 const char *locker_function; /* Func that took it */
66 struct list_head freelists[SHADOW_MAX_ORDER + 1];
67 struct list_head p2m_freelist;
68 struct list_head p2m_inuse;
69 struct list_head pinned_shadows;
70 unsigned int total_pages; /* number of pages allocated */
71 unsigned int free_pages; /* number of pages on freelists */
72 unsigned int p2m_pages; /* number of pages in p2m map */
73 unsigned int opt_flags; /* runtime tunable optimizations on/off */
75 /* Shadow hashtable */
76 struct shadow_page_info **hash_table;
77 int hash_walking; /* Some function is walking the hash table */
79 /* Shadow log-dirty bitmap */
80 unsigned long *dirty_bitmap;
81 unsigned int dirty_bitmap_size; /* in pages, bit per page */
83 /* Shadow log-dirty mode stats */
84 unsigned int fault_count;
85 unsigned int dirty_count;
86 };
88 struct arch_domain
89 {
90 l1_pgentry_t *mm_perdomain_pt;
91 #ifdef CONFIG_X86_64
92 l2_pgentry_t *mm_perdomain_l2;
93 l3_pgentry_t *mm_perdomain_l3;
94 #endif
96 #ifdef CONFIG_X86_32
97 /* map_domain_page() mapping cache. */
98 struct mapcache mapcache;
99 #endif
101 /* I/O-port admin-specified access capabilities. */
102 struct rangeset *ioport_caps;
104 struct hvm_domain hvm_domain;
106 struct shadow_domain shadow;
108 /* Shadow translated domain: P2M mapping */
109 pagetable_t phys_table;
110 /* Highest guest frame that's ever been mapped in the p2m */
111 unsigned long max_mapped_pfn;
113 /* Pseudophysical e820 map (XENMEM_memory_map). */
114 struct e820entry e820[3];
115 unsigned int nr_e820;
116 } __cacheline_aligned;
118 #ifdef CONFIG_X86_PAE
119 struct pae_l3_cache {
120 /*
121 * Two low-memory (<4GB) PAE L3 tables, used as fallback when the guest
122 * supplies a >=4GB PAE L3 table. We need two because we cannot set up
123 * an L3 table while we are currently running on it (without using
124 * expensive atomic 64-bit operations).
125 */
126 l3_pgentry_t table[2][4] __attribute__((__aligned__(32)));
127 unsigned long high_mfn; /* The >=4GB MFN being shadowed. */
128 unsigned int inuse_idx; /* Which of the two cache slots is in use? */
129 spinlock_t lock;
130 };
131 #define pae_l3_cache_init(c) spin_lock_init(&(c)->lock)
132 #else /* !CONFIG_X86_PAE */
133 struct pae_l3_cache { };
134 #define pae_l3_cache_init(c) ((void)0)
135 #endif
137 struct shadow_vcpu {
138 #if CONFIG_PAGING_LEVELS >= 3
139 /* PAE guests: per-vcpu shadow top-level table */
140 l3_pgentry_t l3table[4] __attribute__((__aligned__(32)));
141 #endif
142 /* Pointers to mode-specific entry points. */
143 struct shadow_paging_mode *mode;
144 /* Last MFN that we emulated a write to. */
145 unsigned long last_emulated_mfn;
146 /* MFN of the last shadow that we shot a writeable mapping in */
147 unsigned long last_writeable_pte_smfn;
148 /* HVM guest: paging enabled (CR0.PG)? */
149 unsigned int translate_enabled:1;
150 };
152 struct arch_vcpu
153 {
154 /* Needs 16-byte aligment for FXSAVE/FXRSTOR. */
155 struct vcpu_guest_context guest_context
156 __attribute__((__aligned__(16)));
158 struct pae_l3_cache pae_l3_cache;
160 unsigned long flags; /* TF_ */
162 void (*schedule_tail) (struct vcpu *);
164 void (*ctxt_switch_from) (struct vcpu *);
165 void (*ctxt_switch_to) (struct vcpu *);
167 /* Bounce information for propagating an exception to guest OS. */
168 struct trap_bounce trap_bounce;
170 /* I/O-port access bitmap. */
171 XEN_GUEST_HANDLE(uint8_t) iobmp; /* Guest kernel virtual address of the bitmap. */
172 int iobmp_limit; /* Number of ports represented in the bitmap. */
173 int iopl; /* Current IOPL for this VCPU. */
175 #ifdef CONFIG_X86_32
176 struct desc_struct int80_desc;
177 #endif
179 /* Virtual Machine Extensions */
180 struct hvm_vcpu hvm_vcpu;
182 /*
183 * Every domain has a L1 pagetable of its own. Per-domain mappings
184 * are put in this table (eg. the current GDT is mapped here).
185 */
186 l1_pgentry_t *perdomain_ptes;
188 #ifdef CONFIG_X86_64
189 pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */
190 #endif
191 pagetable_t guest_table; /* (MFN) guest notion of cr3 */
192 /* guest_table holds a ref to the page, and also a type-count unless
193 * shadow refcounts are in use */
194 pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */
195 pagetable_t monitor_table; /* (MFN) hypervisor PT (for HVM) */
196 unsigned long cr3; /* (MA) value to install in HW CR3 */
198 void *guest_vtable; /* virtual addr of pagetable */
200 /* Current LDT details. */
201 unsigned long shadow_ldt_mapcnt;
203 struct shadow_vcpu shadow;
204 } __cacheline_aligned;
206 /* shorthands to improve code legibility */
207 #define hvm_vmx hvm_vcpu.u.vmx
208 #define hvm_svm hvm_vcpu.u.svm
210 #endif /* __ASM_DOMAIN_H__ */
212 /*
213 * Local variables:
214 * mode: C
215 * c-set-style: "BSD"
216 * c-basic-offset: 4
217 * tab-width: 4
218 * indent-tabs-mode: nil
219 * End:
220 */