ia64/xen-unstable

view xen/include/asm-ia64/domain.h @ 16784:6a7fa7dbde56

[IA64] domheap: Fix current->domain->arch.shadow_bitmap reference

Don't reference current->domain->arch.shadow_bitmap in dirty_bit fault handler.
Instead copy it to arch_vcpu.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 4f1f9ee50133
children 430a036ab261
line source
1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
4 #include <linux/thread_info.h>
5 #include <asm/tlb.h>
6 #include <asm/vmx_vpd.h>
7 #include <asm/vmmu.h>
8 #include <asm/regionreg.h>
9 #include <public/xen.h>
10 #include <asm/vmx_platform.h>
11 #include <xen/list.h>
12 #include <xen/cpumask.h>
13 #include <asm/fpswa.h>
14 #include <xen/rangeset.h>
16 struct p2m_entry;
17 #ifdef CONFIG_XEN_IA64_TLB_TRACK
18 struct tlb_track;
19 #endif
21 struct vcpu;
22 extern void relinquish_vcpu_resources(struct vcpu *v);
23 extern int vcpu_late_initialise(struct vcpu *v);
25 /* given a current domain metaphysical address, return the physical address */
26 extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,
27 struct p2m_entry* entry);
29 /* Set shared_info virtual address. */
30 extern unsigned long domain_set_shared_info_va (unsigned long va);
32 /* Flush cache of domain d.
33 If sync_only is true, only synchronize I&D caches,
34 if false, flush and invalidate caches. */
35 extern void domain_cache_flush (struct domain *d, int sync_only);
37 /* Control the shadow mode. */
38 extern int shadow_mode_control(struct domain *d, xen_domctl_shadow_op_t *sc);
40 /* Cleanly crash the current domain with a message. */
41 extern void panic_domain(struct pt_regs *, const char *, ...)
42 __attribute__ ((noreturn, format (printf, 2, 3)));
44 struct mm_struct {
45 volatile pgd_t * pgd;
46 // atomic_t mm_users; /* How many users with user space? */
47 };
49 struct foreign_p2m {
50 spinlock_t lock;
51 /*
52 * sorted list with entry->gpfn.
53 * It is expected that only small number of foreign domain p2m
54 * mapping happens at the same time.
55 */
56 struct list_head head;
57 };
59 struct last_vcpu {
60 #define INVALID_VCPU_ID INT_MAX
61 int vcpu_id;
62 #ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
63 u32 tlbflush_timestamp;
64 #endif
65 } ____cacheline_aligned_in_smp;
67 /* These are data in domain memory for SAL emulator. */
68 struct xen_sal_data {
69 /* OS boot rendez vous. */
70 unsigned long boot_rdv_ip;
71 unsigned long boot_rdv_r1;
73 /* There are these for EFI_SET_VIRTUAL_ADDRESS_MAP emulation. */
74 int efi_virt_mode; /* phys : 0 , virt : 1 */
75 };
77 /*
78 * Optimization features are used by the hypervisor to do some optimizations
79 * for guests. By default the optimizations are switched off and the guest
80 * may activate the feature. The guest may do this via the hypercall
81 * __HYPERVISOR_opt_feature. Domain builder code can also enable these
82 * via XEN_DOMCTL_set_opt_feature.
83 */
85 /*
86 * Helper struct for the different identity mapping optimizations.
87 * The hypervisor does the insertion of address translations in the tlb
88 * for identity mapped areas without reflecting the page fault
89 * to the guest.
90 */
91 struct identity_mapping {
92 unsigned long pgprot; /* The page protection bit mask of the pte.*/
93 unsigned long key; /* A protection key. */
94 };
96 /* opt_feature mask */
97 /*
98 * If this feature is switched on, the hypervisor inserts the
99 * tlb entries without calling the guests traphandler.
100 * This is useful in guests using region 7 for identity mapping
101 * like the linux kernel does.
102 */
103 #define XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT 0
104 #define XEN_IA64_OPTF_IDENT_MAP_REG7_FLG \
105 (1UL << XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT)
107 /* Identity mapping of region 4 addresses in HVM. */
108 #define XEN_IA64_OPTF_IDENT_MAP_REG4_FLG_BIT \
109 (XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT + 1)
110 #define XEN_IA64_OPTF_IDENT_MAP_REG4_FLG \
111 (1UL << XEN_IA64_OPTF_IDENT_MAP_REG4_FLG_BIT)
113 /* Identity mapping of region 5 addresses in HVM. */
114 #define XEN_IA64_OPTF_IDENT_MAP_REG5_FLG_BIT \
115 (XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT + 2)
116 #define XEN_IA64_OPTF_IDENT_MAP_REG5_FLG \
117 (1UL << XEN_IA64_OPTF_IDENT_MAP_REG5_FLG_BIT)
119 /* Central structure for optimzation features used by the hypervisor. */
120 struct opt_feature {
121 unsigned long mask; /* For every feature one bit. */
122 struct identity_mapping im_reg4; /* Region 4 identity mapping */
123 struct identity_mapping im_reg5; /* Region 5 identity mapping */
124 struct identity_mapping im_reg7; /* Region 7 identity mapping */
125 };
127 /* Set an optimization feature in the struct arch_domain. */
128 extern int domain_opt_feature(struct domain *, struct xen_ia64_opt_feature*);
130 struct arch_domain {
131 struct mm_struct mm;
133 /* Flags. */
134 union {
135 unsigned long flags;
136 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
137 struct {
138 unsigned int has_pervcpu_vhpt : 1;
139 unsigned int vhpt_size_log2 : 6;
140 };
141 #endif
142 };
144 /* maximum metaphysical address of conventional memory */
145 u64 convmem_end;
147 /* Allowed accesses to io ports. */
148 struct rangeset *ioport_caps;
150 /* There are two ranges of RID for a domain:
151 one big range, used to virtualize domain RID,
152 one small range for internal Xen use (metaphysical). */
153 /* Big range. */
154 unsigned int starting_rid; /* first RID assigned to domain */
155 unsigned int ending_rid; /* one beyond highest RID assigned to domain */
156 /* Metaphysical range. */
157 unsigned int starting_mp_rid;
158 unsigned int ending_mp_rid;
159 /* RID for metaphysical mode. */
160 unsigned int metaphysical_rid_dt; /* dt=it=0 */
161 unsigned int metaphysical_rid_d; /* dt=0, it=1 */
163 unsigned char rid_bits; /* number of virtual rid bits (default: 18) */
164 int breakimm; /* The imm value for hypercalls. */
166 struct virtual_platform_def vmx_platform;
167 #define hvm_domain vmx_platform /* platform defs are not vmx specific */
169 u64 shared_info_va;
171 /* Address of SAL emulator data */
172 struct xen_sal_data *sal_data;
174 /* Address of efi_runtime_services_t (placed in domain memory) */
175 void *efi_runtime;
176 /* Address of fpswa_interface_t (placed in domain memory) */
177 void *fpswa_inf;
179 /* Bitmap of shadow dirty bits.
180 Set iff shadow mode is enabled. */
181 u64 *shadow_bitmap;
182 /* Length (in bits!) of shadow bitmap. */
183 unsigned long shadow_bitmap_size;
184 /* Number of bits set in bitmap. */
185 atomic64_t shadow_dirty_count;
186 /* Number of faults. */
187 atomic64_t shadow_fault_count;
189 /* for foreign domain p2m table mapping */
190 struct foreign_p2m foreign_p2m;
192 struct last_vcpu last_vcpu[NR_CPUS];
194 struct opt_feature opt_feature;
196 /* Debugging flags. See arch-ia64.h for bits definition. */
197 unsigned int debug_flags;
199 /* Reason of debugging break. */
200 unsigned int debug_event;
202 #ifdef CONFIG_XEN_IA64_TLB_TRACK
203 struct tlb_track* tlb_track;
204 #endif
206 /* for domctl_destroy_domain continuation */
207 enum {
208 RELRES_not_started,
209 RELRES_mm_teardown,
210 RELRES_xen,
211 RELRES_dom,
212 RELRES_done,
213 } relres;
214 /* Continuable mm_teardown() */
215 unsigned long mm_teardown_offset;
216 /* Continuable domain_relinquish_resources() */
217 struct list_head relmem_list;
218 };
219 #define INT_ENABLE_OFFSET(v) \
220 (sizeof(vcpu_info_t) * (v)->vcpu_id + \
221 offsetof(vcpu_info_t, evtchn_upcall_mask))
223 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
224 #define HAS_PERVCPU_VHPT(d) ((d)->arch.has_pervcpu_vhpt)
225 #else
226 #define HAS_PERVCPU_VHPT(d) (0)
227 #endif
230 struct arch_vcpu {
231 /* Save the state of vcpu.
232 This is the first entry to speed up accesses. */
233 mapped_regs_t *privregs;
235 /* TR and TC. */
236 TR_ENTRY itrs[NITRS];
237 TR_ENTRY dtrs[NDTRS];
238 TR_ENTRY itlb;
239 TR_ENTRY dtlb;
241 /* Bit is set if there is a tr/tc for the region. */
242 unsigned char itr_regions;
243 unsigned char dtr_regions;
244 unsigned char tc_regions;
246 unsigned long irr[4]; /* Interrupt request register. */
247 unsigned long insvc[4]; /* Interrupt in service. */
248 unsigned long iva;
249 unsigned long domain_itm;
250 unsigned long domain_itm_last;
252 unsigned long event_callback_ip; // event callback handler
253 unsigned long failsafe_callback_ip; // Do we need it?
255 /* These fields are copied from arch_domain to make access easier/faster
256 in assembly code. */
257 unsigned long metaphysical_rid_dt; // from arch_domain (so is pinned)
258 unsigned long metaphysical_rid_d; // from arch_domain (so is pinned)
259 unsigned long metaphysical_saved_rr0; // from arch_domain (so is pinned)
260 unsigned long metaphysical_saved_rr4; // from arch_domain (so is pinned)
261 unsigned long fp_psr; // used for lazy float register
262 u64 *shadow_bitmap; // from arch_domain (so is pinned)
263 int breakimm; // from arch_domain (so is pinned)
264 int starting_rid; /* first RID assigned to domain */
265 int ending_rid; /* one beyond highest RID assigned to domain */
266 unsigned char rid_bits; // from arch_domain (so is pinned)
268 /* Bitset for debug register use. */
269 unsigned int dbg_used;
270 u64 dbr[IA64_NUM_DBG_REGS];
271 u64 ibr[IA64_NUM_DBG_REGS];
273 struct thread_struct _thread; // this must be last
275 thash_cb_t vtlb;
276 thash_cb_t vhpt;
277 char irq_new_pending;
278 char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI
279 char hypercall_continuation;
281 fpswa_ret_t fpswa_ret; /* save return values of FPSWA emulation */
282 struct timer hlt_timer;
283 struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
285 /* This vector hosts the protection keys for pkr emulation of PV domains.
286 * Currently only 15 registers are usable by domU's. pkr[15] is
287 * reserved for the hypervisor. */
288 unsigned long pkrs[XEN_IA64_NPKRS+1]; /* protection key registers */
289 #define XEN_IA64_PKR_IN_USE 0x1 /* If psr.pk = 1 was set. */
290 unsigned char pkr_flags;
292 unsigned char vhpt_pg_shift; /* PAGE_SHIFT or less */
293 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
294 PTA pta;
295 unsigned long vhpt_maddr;
296 struct page_info* vhpt_page;
297 unsigned long vhpt_entries;
298 #endif
299 #define INVALID_PROCESSOR INT_MAX
300 int last_processor;
301 cpumask_t cache_coherent_map;
302 };
304 #include <asm/uaccess.h> /* for KERNEL_DS */
305 #include <asm/pgtable.h>
307 int
308 do_perfmon_op(unsigned long cmd,
309 XEN_GUEST_HANDLE(void) arg1, unsigned long arg2);
311 void
312 ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
313 unsigned long iim, unsigned long itir, unsigned long arg5,
314 unsigned long arg6, unsigned long arg7, unsigned long stack);
316 #endif /* __ASM_DOMAIN_H__ */
318 /*
319 * Local variables:
320 * mode: C
321 * c-set-style: "BSD"
322 * c-basic-offset: 4
323 * tab-width: 4
324 * indent-tabs-mode: nil
325 * End:
326 */