ia64/xen-unstable

view xen/include/asm-ia64/domain.h @ 16682:7515dc56c124

[IA64] Sort out the XEN_IA64_OPTF_IDENT_MAP_REG[457] constants confusion

Currently the constants are used for two different purpose.
one is for the OPTF hypercall sub command.
another is bit flag for struct opt_feature::mask.
They are different spaces, split them out.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Sun Dec 30 13:02:16 2007 -0700 (2007-12-30)
parents f9ca1d8c9e65
children 09cd682ac68e
line source
1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
4 #include <linux/thread_info.h>
5 #include <asm/tlb.h>
6 #include <asm/vmx_vpd.h>
7 #include <asm/vmmu.h>
8 #include <asm/regionreg.h>
9 #include <public/xen.h>
10 #include <asm/vmx_platform.h>
11 #include <xen/list.h>
12 #include <xen/cpumask.h>
13 #include <asm/fpswa.h>
14 #include <xen/rangeset.h>
16 struct p2m_entry;
17 #ifdef CONFIG_XEN_IA64_TLB_TRACK
18 struct tlb_track;
19 #endif
21 struct vcpu;
22 extern void relinquish_vcpu_resources(struct vcpu *v);
23 extern int vcpu_late_initialise(struct vcpu *v);
25 /* given a current domain metaphysical address, return the physical address */
26 extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,
27 struct p2m_entry* entry);
29 /* Set shared_info virtual address. */
30 extern unsigned long domain_set_shared_info_va (unsigned long va);
32 /* Flush cache of domain d.
33 If sync_only is true, only synchronize I&D caches,
34 if false, flush and invalidate caches. */
35 extern void domain_cache_flush (struct domain *d, int sync_only);
37 /* Control the shadow mode. */
38 extern int shadow_mode_control(struct domain *d, xen_domctl_shadow_op_t *sc);
40 /* Cleanly crash the current domain with a message. */
41 extern void panic_domain(struct pt_regs *, const char *, ...)
42 __attribute__ ((noreturn, format (printf, 2, 3)));
44 struct mm_struct {
45 volatile pgd_t * pgd;
46 // atomic_t mm_users; /* How many users with user space? */
47 };
49 struct foreign_p2m {
50 spinlock_t lock;
51 /*
52 * sorted list with entry->gpfn.
53 * It is expected that only small number of foreign domain p2m
54 * mapping happens at the same time.
55 */
56 struct list_head head;
57 };
59 struct last_vcpu {
60 #define INVALID_VCPU_ID INT_MAX
61 int vcpu_id;
62 #ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
63 u32 tlbflush_timestamp;
64 #endif
65 } ____cacheline_aligned_in_smp;
67 /* These are data in domain memory for SAL emulator. */
68 struct xen_sal_data {
69 /* OS boot rendez vous. */
70 unsigned long boot_rdv_ip;
71 unsigned long boot_rdv_r1;
73 /* There are these for EFI_SET_VIRTUAL_ADDRESS_MAP emulation. */
74 int efi_virt_mode; /* phys : 0 , virt : 1 */
75 };
77 /*
78 * Optimization features are used by the hypervisor to do some optimizations
79 * for guests. By default the optimizations are switched off and the guest
80 * may activate the feature. The guest may do this via the hypercall
81 * __HYPERVISOR_opt_feature. Domain builder code can also enable these
82 * via XEN_DOMCTL_set_opt_feature.
83 */
85 /*
86 * Helper struct for the different identity mapping optimizations.
87 * The hypervisor does the insertion of address translations in the tlb
88 * for identity mapped areas without reflecting the page fault
89 * to the guest.
90 */
91 struct identity_mapping {
92 unsigned long pgprot; /* The page protection bit mask of the pte.*/
93 unsigned long key; /* A protection key. */
94 };
96 /* opt_feature mask */
97 /*
98 * If this feature is switched on, the hypervisor inserts the
99 * tlb entries without calling the guests traphandler.
100 * This is useful in guests using region 7 for identity mapping
101 * like the linux kernel does.
102 */
103 #define XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT 0
104 #define XEN_IA64_OPTF_IDENT_MAP_REG7_FLG \
105 (1UL << XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT)
107 /* Identity mapping of region 4 addresses in HVM. */
108 #define XEN_IA64_OPTF_IDENT_MAP_REG4_FLG_BIT \
109 (XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT + 1)
110 #define XEN_IA64_OPTF_IDENT_MAP_REG4_FLG \
111 (1UL << XEN_IA64_OPTF_IDENT_MAP_REG4_FLG_BIT)
113 /* Identity mapping of region 5 addresses in HVM. */
114 #define XEN_IA64_OPTF_IDENT_MAP_REG5_FLG_BIT \
115 (XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT + 2)
116 #define XEN_IA64_OPTF_IDENT_MAP_REG5_FLG \
117 (1UL << XEN_IA64_OPTF_IDENT_MAP_REG5_FLG_BIT)
119 /* Central structure for optimzation features used by the hypervisor. */
120 struct opt_feature {
121 unsigned long mask; /* For every feature one bit. */
122 struct identity_mapping im_reg4; /* Region 4 identity mapping */
123 struct identity_mapping im_reg5; /* Region 5 identity mapping */
124 struct identity_mapping im_reg7; /* Region 7 identity mapping */
125 };
127 /* Set an optimization feature in the struct arch_domain. */
128 extern int domain_opt_feature(struct domain *, struct xen_ia64_opt_feature*);
130 struct arch_domain {
131 struct mm_struct mm;
133 /* Flags. */
134 union {
135 unsigned long flags;
136 struct {
137 unsigned int is_vti : 1;
138 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
139 unsigned int has_pervcpu_vhpt : 1;
140 unsigned int vhpt_size_log2 : 6;
141 #endif
142 };
143 };
145 /* maximum metaphysical address of conventional memory */
146 u64 convmem_end;
148 /* Allowed accesses to io ports. */
149 struct rangeset *ioport_caps;
151 /* There are two ranges of RID for a domain:
152 one big range, used to virtualize domain RID,
153 one small range for internal Xen use (metaphysical). */
154 /* Big range. */
155 unsigned int starting_rid; /* first RID assigned to domain */
156 unsigned int ending_rid; /* one beyond highest RID assigned to domain */
157 /* Metaphysical range. */
158 unsigned int starting_mp_rid;
159 unsigned int ending_mp_rid;
160 /* RID for metaphysical mode. */
161 unsigned int metaphysical_rid_dt; /* dt=it=0 */
162 unsigned int metaphysical_rid_d; /* dt=0, it=1 */
164 unsigned char rid_bits; /* number of virtual rid bits (default: 18) */
165 int breakimm; /* The imm value for hypercalls. */
167 struct virtual_platform_def vmx_platform;
168 #define hvm_domain vmx_platform /* platform defs are not vmx specific */
170 u64 shared_info_va;
172 /* Address of SAL emulator data */
173 struct xen_sal_data *sal_data;
175 /* Address of efi_runtime_services_t (placed in domain memory) */
176 void *efi_runtime;
177 /* Address of fpswa_interface_t (placed in domain memory) */
178 void *fpswa_inf;
180 /* Bitmap of shadow dirty bits.
181 Set iff shadow mode is enabled. */
182 u64 *shadow_bitmap;
183 /* Length (in bits!) of shadow bitmap. */
184 unsigned long shadow_bitmap_size;
185 /* Number of bits set in bitmap. */
186 atomic64_t shadow_dirty_count;
187 /* Number of faults. */
188 atomic64_t shadow_fault_count;
190 /* for foreign domain p2m table mapping */
191 struct foreign_p2m foreign_p2m;
193 struct last_vcpu last_vcpu[NR_CPUS];
195 struct opt_feature opt_feature;
197 /* Debugging flags. See arch-ia64.h for bits definition. */
198 unsigned int debug_flags;
200 /* Reason of debugging break. */
201 unsigned int debug_event;
203 #ifdef CONFIG_XEN_IA64_TLB_TRACK
204 struct tlb_track* tlb_track;
205 #endif
207 /* for domctl_destroy_domain continuation */
208 enum {
209 RELRES_not_started,
210 RELRES_mm_teardown,
211 RELRES_xen,
212 RELRES_dom,
213 RELRES_done,
214 } relres;
215 /* Continuable mm_teardown() */
216 unsigned long mm_teardown_offset;
217 /* Continuable domain_relinquish_resources() */
218 struct list_head relmem_list;
219 };
220 #define INT_ENABLE_OFFSET(v) \
221 (sizeof(vcpu_info_t) * (v)->vcpu_id + \
222 offsetof(vcpu_info_t, evtchn_upcall_mask))
224 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
225 #define HAS_PERVCPU_VHPT(d) ((d)->arch.has_pervcpu_vhpt)
226 #else
227 #define HAS_PERVCPU_VHPT(d) (0)
228 #endif
231 struct arch_vcpu {
232 /* Save the state of vcpu.
233 This is the first entry to speed up accesses. */
234 mapped_regs_t *privregs;
236 /* TR and TC. */
237 TR_ENTRY itrs[NITRS];
238 TR_ENTRY dtrs[NDTRS];
239 TR_ENTRY itlb;
240 TR_ENTRY dtlb;
242 /* Bit is set if there is a tr/tc for the region. */
243 unsigned char itr_regions;
244 unsigned char dtr_regions;
245 unsigned char tc_regions;
247 unsigned long irr[4]; /* Interrupt request register. */
248 unsigned long insvc[4]; /* Interrupt in service. */
249 unsigned long iva;
250 unsigned long domain_itm;
251 unsigned long domain_itm_last;
253 unsigned long event_callback_ip; // event callback handler
254 unsigned long failsafe_callback_ip; // Do we need it?
256 /* These fields are copied from arch_domain to make access easier/faster
257 in assembly code. */
258 unsigned long metaphysical_rid_dt; // from arch_domain (so is pinned)
259 unsigned long metaphysical_rid_d; // from arch_domain (so is pinned)
260 unsigned long metaphysical_saved_rr0; // from arch_domain (so is pinned)
261 unsigned long metaphysical_saved_rr4; // from arch_domain (so is pinned)
262 unsigned long fp_psr; // used for lazy float register
263 int breakimm; // from arch_domain (so is pinned)
264 int starting_rid; /* first RID assigned to domain */
265 int ending_rid; /* one beyond highest RID assigned to domain */
267 /* Bitset for debug register use. */
268 unsigned int dbg_used;
269 u64 dbr[IA64_NUM_DBG_REGS];
270 u64 ibr[IA64_NUM_DBG_REGS];
272 struct thread_struct _thread; // this must be last
274 thash_cb_t vtlb;
275 thash_cb_t vhpt;
276 char irq_new_pending;
277 char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI
278 char hypercall_continuation;
280 fpswa_ret_t fpswa_ret; /* save return values of FPSWA emulation */
281 struct timer hlt_timer;
282 struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
284 /* This vector hosts the protection keys for pkr emulation of PV domains.
285 * Currently only 15 registers are usable by domU's. pkr[15] is
286 * reserved for the hypervisor. */
287 unsigned long pkrs[XEN_IA64_NPKRS+1]; /* protection key registers */
288 #define XEN_IA64_PKR_IN_USE 0x1 /* If psr.pk = 1 was set. */
289 unsigned char pkr_flags;
291 unsigned char vhpt_pg_shift; /* PAGE_SHIFT or less */
292 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
293 PTA pta;
294 unsigned long vhpt_maddr;
295 struct page_info* vhpt_page;
296 unsigned long vhpt_entries;
297 #endif
298 #define INVALID_PROCESSOR INT_MAX
299 int last_processor;
300 cpumask_t cache_coherent_map;
301 };
303 #include <asm/uaccess.h> /* for KERNEL_DS */
304 #include <asm/pgtable.h>
306 int
307 do_perfmon_op(unsigned long cmd,
308 XEN_GUEST_HANDLE(void) arg1, unsigned long arg2);
310 #endif /* __ASM_DOMAIN_H__ */
312 /*
313 * Local variables:
314 * mode: C
315 * c-set-style: "BSD"
316 * c-basic-offset: 4
317 * tab-width: 4
318 * indent-tabs-mode: nil
319 * End:
320 */