ia64/xen-unstable

view xen/include/asm-ia64/domain.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents af0da711bbdb
children
line source
1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
4 #include <linux/thread_info.h>
5 #include <asm/tlb.h>
6 #include <asm/vmx_vpd.h>
7 #include <asm/vmmu.h>
8 #include <asm/regionreg.h>
9 #include <public/xen.h>
10 #include <asm/vmx_platform.h>
11 #include <xen/list.h>
12 #include <xen/cpumask.h>
13 #include <xen/mm.h>
14 #include <asm/fpswa.h>
15 #include <xen/rangeset.h>
17 struct p2m_entry;
18 #ifdef CONFIG_XEN_IA64_TLB_TRACK
19 struct tlb_track;
20 #endif
22 extern unsigned long volatile jiffies;
24 struct vcpu;
25 extern void relinquish_vcpu_resources(struct vcpu *v);
26 extern int vcpu_late_initialise(struct vcpu *v);
28 /* given a current domain metaphysical address, return the physical address */
29 extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,
30 struct p2m_entry* entry);
32 /* Set shared_info virtual address. */
33 extern unsigned long domain_set_shared_info_va (unsigned long va);
35 /* Flush cache of domain d.
36 If sync_only is true, only synchronize I&D caches,
37 if false, flush and invalidate caches. */
38 extern void domain_cache_flush (struct domain *d, int sync_only);
40 /* Control the shadow mode. */
41 extern int shadow_mode_control(struct domain *d, xen_domctl_shadow_op_t *sc);
43 /* Cleanly crash the current domain with a message. */
44 extern void panic_domain(struct pt_regs *, const char *, ...)
45 __attribute__ ((noreturn, format (printf, 2, 3)));
47 #define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))
49 struct mm_struct {
50 volatile pgd_t * pgd;
51 // atomic_t mm_users; /* How many users with user space? */
52 };
54 struct foreign_p2m {
55 spinlock_t lock;
56 /*
57 * sorted list with entry->gpfn.
58 * It is expected that only small number of foreign domain p2m
59 * mapping happens at the same time.
60 */
61 struct list_head head;
62 };
64 struct last_vcpu {
65 #define INVALID_VCPU_ID INT_MAX
66 int vcpu_id;
67 #ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
68 u32 tlbflush_timestamp;
69 #endif
70 } ____cacheline_aligned_in_smp;
72 /* These are data in domain memory for SAL emulator. */
73 struct xen_sal_data {
74 /* OS boot rendez vous. */
75 unsigned long boot_rdv_ip;
76 unsigned long boot_rdv_r1;
78 /* There are these for EFI_SET_VIRTUAL_ADDRESS_MAP emulation. */
79 int efi_virt_mode; /* phys : 0 , virt : 1 */
80 };
82 /*
83 * Optimization features are used by the hypervisor to do some optimizations
84 * for guests. By default the optimizations are switched off and the guest
85 * may activate the feature. The guest may do this via the hypercall
86 * __HYPERVISOR_opt_feature. Domain builder code can also enable these
87 * via XEN_DOMCTL_set_opt_feature.
88 */
90 /*
91 * Helper struct for the different identity mapping optimizations.
92 * The hypervisor does the insertion of address translations in the tlb
93 * for identity mapped areas without reflecting the page fault
94 * to the guest.
95 */
96 struct identity_mapping {
97 unsigned long pgprot; /* The page protection bit mask of the pte.*/
98 unsigned long key; /* A protection key. */
99 };
101 /* opt_feature mask */
102 /*
103 * If this feature is switched on, the hypervisor inserts the
104 * tlb entries without calling the guests traphandler.
105 * This is useful in guests using region 7 for identity mapping
106 * like the linux kernel does.
107 */
108 #define XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT 0
109 #define XEN_IA64_OPTF_IDENT_MAP_REG7_FLG \
110 (1UL << XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT)
112 /* Identity mapping of region 4 addresses in HVM. */
113 #define XEN_IA64_OPTF_IDENT_MAP_REG4_FLG_BIT \
114 (XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT + 1)
115 #define XEN_IA64_OPTF_IDENT_MAP_REG4_FLG \
116 (1UL << XEN_IA64_OPTF_IDENT_MAP_REG4_FLG_BIT)
118 /* Identity mapping of region 5 addresses in HVM. */
119 #define XEN_IA64_OPTF_IDENT_MAP_REG5_FLG_BIT \
120 (XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT + 2)
121 #define XEN_IA64_OPTF_IDENT_MAP_REG5_FLG \
122 (1UL << XEN_IA64_OPTF_IDENT_MAP_REG5_FLG_BIT)
124 /* Central structure for optimzation features used by the hypervisor. */
125 struct opt_feature {
126 unsigned long mask; /* For every feature one bit. */
127 struct identity_mapping im_reg4; /* Region 4 identity mapping */
128 struct identity_mapping im_reg5; /* Region 5 identity mapping */
129 struct identity_mapping im_reg7; /* Region 7 identity mapping */
130 };
132 /* Set an optimization feature in the struct arch_domain. */
133 extern int domain_opt_feature(struct domain *, struct xen_ia64_opt_feature*);
135 struct arch_domain {
136 struct mm_struct mm;
138 /* Flags. */
139 union {
140 unsigned long flags;
141 struct {
142 unsigned int is_sioemu : 1;
143 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
144 unsigned int has_pervcpu_vhpt : 1;
145 unsigned int vhpt_size_log2 : 6;
146 #endif
147 };
148 };
150 /* maximum metaphysical address of conventional memory */
151 u64 convmem_end;
153 /* Allowed accesses to io ports. */
154 struct rangeset *ioport_caps;
156 /* There are two ranges of RID for a domain:
157 one big range, used to virtualize domain RID,
158 one small range for internal Xen use (metaphysical). */
159 /* Big range. */
160 unsigned int starting_rid; /* first RID assigned to domain */
161 unsigned int ending_rid; /* one beyond highest RID assigned to domain */
162 /* Metaphysical range. */
163 unsigned int starting_mp_rid;
164 unsigned int ending_mp_rid;
165 /* RID for metaphysical mode. */
166 unsigned int metaphysical_rid_dt; /* dt=it=0 */
167 unsigned int metaphysical_rid_d; /* dt=0, it=1 */
169 unsigned char rid_bits; /* number of virtual rid bits (default: 18) */
170 int breakimm; /* The imm value for hypercalls. */
172 struct list_head pdev_list;
173 struct virtual_platform_def vmx_platform;
174 #define hvm_domain vmx_platform /* platform defs are not vmx specific */
176 u64 shared_info_va;
178 /* Address of SAL emulator data */
179 struct xen_sal_data *sal_data;
181 /* Shared page for notifying that explicit PIRQ EOI is required. */
182 unsigned long *pirq_eoi_map;
183 unsigned long pirq_eoi_map_mfn;
185 /* Address of efi_runtime_services_t (placed in domain memory) */
186 void *efi_runtime;
187 /* Address of fpswa_interface_t (placed in domain memory) */
188 void *fpswa_inf;
190 /* Bitmap of shadow dirty bits.
191 Set iff shadow mode is enabled. */
192 u64 *shadow_bitmap;
193 /* Length (in bits!) of shadow bitmap. */
194 unsigned long shadow_bitmap_size;
195 /* Number of bits set in bitmap. */
196 atomic64_t shadow_dirty_count;
197 /* Number of faults. */
198 atomic64_t shadow_fault_count;
200 /* for foreign domain p2m table mapping */
201 struct foreign_p2m foreign_p2m;
203 struct last_vcpu last_vcpu[NR_CPUS];
205 struct opt_feature opt_feature;
207 /* Debugging flags. See arch-ia64.h for bits definition. */
208 unsigned int debug_flags;
210 /* Reason of debugging break. */
211 unsigned int debug_event;
213 #ifdef CONFIG_XEN_IA64_TLB_TRACK
214 struct tlb_track* tlb_track;
215 #endif
217 /* for domctl_destroy_domain continuation */
218 enum {
219 RELRES_not_started,
220 RELRES_mm_teardown,
221 RELRES_xen,
222 RELRES_dom,
223 RELRES_done,
224 } relres;
225 /* Continuable mm_teardown() */
226 unsigned long mm_teardown_offset;
227 /* Continuable domain_relinquish_resources() */
228 struct page_list_head relmem_list;
229 };
230 #define INT_ENABLE_OFFSET(v) \
231 (sizeof(vcpu_info_t) * (v)->vcpu_id + \
232 offsetof(vcpu_info_t, evtchn_upcall_mask))
234 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
235 #define HAS_PERVCPU_VHPT(d) ((d)->arch.has_pervcpu_vhpt)
236 #else
237 #define HAS_PERVCPU_VHPT(d) (0)
238 #endif
241 struct arch_vcpu {
242 /* Save the state of vcpu.
243 This is the first entry to speed up accesses. */
244 mapped_regs_t *privregs;
246 /* TR and TC. */
247 TR_ENTRY itrs[NITRS];
248 TR_ENTRY dtrs[NDTRS];
249 TR_ENTRY itlb;
250 TR_ENTRY dtlb;
252 /* Bit is set if there is a tr/tc for the region. */
253 unsigned char itr_regions;
254 unsigned char dtr_regions;
255 unsigned char tc_regions;
257 unsigned long irr[4]; /* Interrupt request register. */
258 unsigned long insvc[4]; /* Interrupt in service. */
259 unsigned long iva;
260 unsigned long domain_itm;
261 unsigned long domain_itm_last;
263 unsigned long event_callback_ip; // event callback handler
264 unsigned long failsafe_callback_ip; // Do we need it?
266 /* These fields are copied from arch_domain to make access easier/faster
267 in assembly code. */
268 unsigned long metaphysical_rid_dt; // from arch_domain (so is pinned)
269 unsigned long metaphysical_rid_d; // from arch_domain (so is pinned)
270 unsigned long metaphysical_saved_rr0; // from arch_domain (so is pinned)
271 unsigned long metaphysical_saved_rr4; // from arch_domain (so is pinned)
272 unsigned long fp_psr; // used for lazy float register
273 u64 *shadow_bitmap; // from arch_domain (so is pinned)
274 int breakimm; // from arch_domain (so is pinned)
275 int starting_rid; /* first RID assigned to domain */
276 int ending_rid; /* one beyond highest RID assigned to domain */
277 unsigned char rid_bits; // from arch_domain (so is pinned)
279 /* Bitset for debug register use. */
280 unsigned int dbg_used;
281 u64 dbr[IA64_NUM_DBG_REGS];
282 u64 ibr[IA64_NUM_DBG_REGS];
284 struct thread_struct _thread; // this must be last
286 thash_cb_t vtlb;
287 thash_cb_t vhpt;
288 char irq_new_pending;
289 char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI
290 char hypercall_continuation;
292 fpswa_ret_t fpswa_ret; /* save return values of FPSWA emulation */
293 struct timer hlt_timer;
294 struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
296 /* This vector hosts the protection keys for pkr emulation of PV domains.
297 * Currently only 15 registers are usable by domU's. pkr[15] is
298 * reserved for the hypervisor. */
299 unsigned long pkrs[XEN_IA64_NPKRS+1]; /* protection key registers */
300 #define XEN_IA64_PKR_IN_USE 0x1 /* If psr.pk = 1 was set. */
301 unsigned char pkr_flags;
303 unsigned char vhpt_pg_shift; /* PAGE_SHIFT or less */
304 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
305 PTA pta;
306 unsigned long vhpt_maddr;
307 struct page_info* vhpt_page;
308 unsigned long vhpt_entries;
309 #endif
310 #define INVALID_PROCESSOR INT_MAX
311 int last_processor;
312 cpumask_t cache_coherent_map;
313 };
315 #include <asm/uaccess.h> /* for KERNEL_DS */
316 #include <asm/pgtable.h>
318 int
319 do_perfmon_op(unsigned long cmd,
320 XEN_GUEST_HANDLE(void) arg1, unsigned long arg2);
322 void
323 ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
324 unsigned long iim, unsigned long itir, unsigned long arg5,
325 unsigned long arg6, unsigned long arg7, unsigned long stack);
327 void
328 ia64_lazy_load_fpu(struct vcpu *vcpu);
330 int construct_dom0(
331 struct domain *d,
332 unsigned long image_start, unsigned long image_len,
333 unsigned long initrd_start, unsigned long initrd_len,
334 char *cmdline);
336 #endif /* __ASM_DOMAIN_H__ */
338 /*
339 * Local variables:
340 * mode: C
341 * c-set-style: "BSD"
342 * c-basic-offset: 4
343 * tab-width: 4
344 * indent-tabs-mode: nil
345 * End:
346 */