ia64/xen-unstable

view xen/include/asm-x86/domain.h @ 10892:0d2ba35c0cf2

[XEN] Add hypercall support for HVM guests. This is
fairly useless at the moment, since all of the hypercalls
fail, since copy_from_user doesn't work correctly in HVM
domains.

Signed-off-by: Steven Smith <ssmith@xensource.com>

Add a CPUID hypervisor platform interface at leaf
0x40000000. Allow hypercall transfer page to be filled
in via MSR 0x40000000.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Aug 01 17:18:05 2006 +0100 (2006-08-01)
parents d5f98d23427a
children 9727328c008e
line source
1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
4 #include <xen/config.h>
5 #include <xen/mm.h>
6 #include <asm/hvm/vcpu.h>
7 #include <asm/hvm/domain.h>
9 struct trap_bounce {
10 unsigned long error_code;
11 unsigned short flags; /* TBF_ */
12 unsigned short cs;
13 unsigned long eip;
14 };
16 #define MAPHASH_ENTRIES 8
17 #define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1))
18 #define MAPHASHENT_NOTINUSE ((u16)~0U)
19 struct vcpu_maphash {
20 struct vcpu_maphash_entry {
21 unsigned long pfn;
22 uint16_t idx;
23 uint16_t refcnt;
24 } hash[MAPHASH_ENTRIES];
25 } __cacheline_aligned;
27 #define MAPCACHE_ORDER 10
28 #define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER)
29 struct mapcache {
30 /* The PTEs that provide the mappings, and a cursor into the array. */
31 l1_pgentry_t *l1tab;
32 unsigned int cursor;
34 /* Protects map_domain_page(). */
35 spinlock_t lock;
37 /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */
38 unsigned int epoch, shadow_epoch[MAX_VIRT_CPUS];
39 u32 tlbflush_timestamp;
41 /* Which mappings are in use, and which are garbage to reap next epoch? */
42 unsigned long inuse[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
43 unsigned long garbage[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
45 /* Lock-free per-VCPU hash of recently-used mappings. */
46 struct vcpu_maphash vcpu_maphash[MAX_VIRT_CPUS];
47 };
49 extern void mapcache_init(struct domain *);
51 /* x86/64: toggle guest between kernel and user modes. */
52 extern void toggle_guest_mode(struct vcpu *);
54 /*
55 * Initialise a hypercall-transfer page. The given pointer must be mapped
56 * in Xen virtual address space (accesses are not validated or checked).
57 */
58 extern void hypercall_page_initialise(struct domain *d, void *);
60 struct arch_domain
61 {
62 l1_pgentry_t *mm_perdomain_pt;
63 #ifdef CONFIG_X86_64
64 l2_pgentry_t *mm_perdomain_l2;
65 l3_pgentry_t *mm_perdomain_l3;
66 #endif
68 #ifdef CONFIG_X86_32
69 /* map_domain_page() mapping cache. */
70 struct mapcache mapcache;
71 #endif
73 /* Writable pagetables. */
74 struct ptwr_info ptwr[2];
76 /* I/O-port admin-specified access capabilities. */
77 struct rangeset *ioport_caps;
79 /* Shadow mode status and controls. */
80 struct shadow_ops *ops;
81 unsigned int shadow_mode; /* flags to control shadow table operation */
82 unsigned int shadow_nest; /* Recursive depth of shadow_lock() nesting */
84 /* shadow hashtable */
85 struct shadow_status *shadow_ht;
86 struct shadow_status *shadow_ht_free;
87 struct shadow_status *shadow_ht_extras; /* extra allocation units */
88 unsigned int shadow_extras_count;
90 /* shadow dirty bitmap */
91 unsigned long *shadow_dirty_bitmap;
92 unsigned int shadow_dirty_bitmap_size; /* in pages, bit per page */
94 /* shadow mode stats */
95 unsigned int shadow_page_count;
96 unsigned int hl2_page_count;
97 unsigned int snapshot_page_count;
99 unsigned int shadow_fault_count;
100 unsigned int shadow_dirty_count;
102 /* full shadow mode */
103 struct out_of_sync_entry *out_of_sync; /* list of out-of-sync pages */
104 struct out_of_sync_entry *out_of_sync_free;
105 struct out_of_sync_entry *out_of_sync_extras;
106 unsigned int out_of_sync_extras_count;
108 struct list_head free_shadow_frames;
110 pagetable_t phys_table; /* guest 1:1 pagetable */
111 struct hvm_domain hvm_domain;
113 /* Shadow-translated guest: Pseudophys base address of reserved area. */
114 unsigned long first_reserved_pfn;
115 } __cacheline_aligned;
117 #ifdef CONFIG_X86_PAE
118 struct pae_l3_cache {
119 /*
120 * Two low-memory (<4GB) PAE L3 tables, used as fallback when the guest
121 * supplies a >=4GB PAE L3 table. We need two because we cannot set up
122 * an L3 table while we are currently running on it (without using
123 * expensive atomic 64-bit operations).
124 */
125 l3_pgentry_t table[2][4] __attribute__((__aligned__(32)));
126 unsigned long high_mfn; /* The >=4GB MFN being shadowed. */
127 unsigned int inuse_idx; /* Which of the two cache slots is in use? */
128 spinlock_t lock;
129 };
130 #define pae_l3_cache_init(c) spin_lock_init(&(c)->lock)
131 #else /* !CONFIG_X86_PAE */
132 struct pae_l3_cache { };
133 #define pae_l3_cache_init(c) ((void)0)
134 #endif
136 struct arch_vcpu
137 {
138 /* Needs 16-byte aligment for FXSAVE/FXRSTOR. */
139 struct vcpu_guest_context guest_context
140 __attribute__((__aligned__(16)));
142 struct pae_l3_cache pae_l3_cache;
144 unsigned long flags; /* TF_ */
146 void (*schedule_tail) (struct vcpu *);
148 void (*ctxt_switch_from) (struct vcpu *);
149 void (*ctxt_switch_to) (struct vcpu *);
151 /* Bounce information for propagating an exception to guest OS. */
152 struct trap_bounce trap_bounce;
154 /* I/O-port access bitmap. */
155 u8 *iobmp; /* Guest kernel virtual address of the bitmap. */
156 int iobmp_limit; /* Number of ports represented in the bitmap. */
157 int iopl; /* Current IOPL for this VCPU. */
159 #ifdef CONFIG_X86_32
160 struct desc_struct int80_desc;
161 #endif
163 /* Virtual Machine Extensions */
164 struct hvm_vcpu hvm_vcpu;
166 /*
167 * Every domain has a L1 pagetable of its own. Per-domain mappings
168 * are put in this table (eg. the current GDT is mapped here).
169 */
170 l1_pgentry_t *perdomain_ptes;
172 pagetable_t guest_table_user; /* x86/64: user-space pagetable. */
173 pagetable_t guest_table; /* (MA) guest notion of cr3 */
174 pagetable_t shadow_table; /* (MA) shadow of guest */
175 pagetable_t monitor_table; /* (MA) used in hypervisor */
177 l2_pgentry_t *guest_vtable; /* virtual address of pagetable */
178 l2_pgentry_t *shadow_vtable; /* virtual address of shadow_table */
179 l2_pgentry_t *monitor_vtable; /* virtual address of monitor_table */
180 l1_pgentry_t *hl2_vtable; /* virtual address of hl2_table */
182 #ifdef CONFIG_X86_64
183 l3_pgentry_t *guest_vl3table;
184 l4_pgentry_t *guest_vl4table;
185 #endif
187 unsigned long monitor_shadow_ref;
189 /* Current LDT details. */
190 unsigned long shadow_ldt_mapcnt;
191 } __cacheline_aligned;
193 /* shorthands to improve code legibility */
194 #define hvm_vmx hvm_vcpu.u.vmx
195 #define hvm_svm hvm_vcpu.u.svm
197 #endif /* __ASM_DOMAIN_H__ */
199 /*
200 * Local variables:
201 * mode: C
202 * c-set-style: "BSD"
203 * c-basic-offset: 4
204 * tab-width: 4
205 * indent-tabs-mode: nil
206 * End:
207 */