ia64/xen-unstable

view xen/include/asm-ia64/domain.h @ 15826:7e79e7f01f3d

Implement ia64 continuable domain destroy.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author kfraser@localhost.localdomain
date Fri Aug 31 15:46:37 2007 +0100 (2007-08-31)
parents bd59dd48e208
children d956779d8d47
line source
1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
4 #include <linux/thread_info.h>
5 #include <asm/tlb.h>
6 #include <asm/vmx_vpd.h>
7 #include <asm/vmmu.h>
8 #include <asm/regionreg.h>
9 #include <public/xen.h>
10 #include <asm/vmx_platform.h>
11 #include <xen/list.h>
12 #include <xen/cpumask.h>
13 #include <asm/fpswa.h>
14 #include <xen/rangeset.h>
16 struct p2m_entry;
17 #ifdef CONFIG_XEN_IA64_TLB_TRACK
18 struct tlb_track;
19 #endif
21 struct vcpu;
22 extern void relinquish_vcpu_resources(struct vcpu *v);
23 extern void vcpu_share_privregs_with_guest(struct vcpu *v);
24 extern int vcpu_late_initialise(struct vcpu *v);
26 /* given a current domain metaphysical address, return the physical address */
27 extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,
28 struct p2m_entry* entry);
30 /* Set shared_info virtual address. */
31 extern unsigned long domain_set_shared_info_va (unsigned long va);
33 /* Flush cache of domain d.
34 If sync_only is true, only synchronize I&D caches,
35 if false, flush and invalidate caches. */
36 extern void domain_cache_flush (struct domain *d, int sync_only);
38 /* Control the shadow mode. */
39 extern int shadow_mode_control(struct domain *d, xen_domctl_shadow_op_t *sc);
41 /* Cleanly crash the current domain with a message. */
42 extern void panic_domain(struct pt_regs *, const char *, ...)
43 __attribute__ ((noreturn, format (printf, 2, 3)));
45 struct mm_struct {
46 volatile pgd_t * pgd;
47 // atomic_t mm_users; /* How many users with user space? */
48 };
50 struct last_vcpu {
51 #define INVALID_VCPU_ID INT_MAX
52 int vcpu_id;
53 #ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
54 u32 tlbflush_timestamp;
55 #endif
56 } ____cacheline_aligned_in_smp;
58 /* These are data in domain memory for SAL emulator. */
59 struct xen_sal_data {
60 /* OS boot rendez vous. */
61 unsigned long boot_rdv_ip;
62 unsigned long boot_rdv_r1;
64 /* There are these for EFI_SET_VIRTUAL_ADDRESS_MAP emulation. */
65 int efi_virt_mode; /* phys : 0 , virt : 1 */
66 };
68 /*
69 * Optimization features
70 * are used by the hypervisor to do some optimizations for guests.
71 * By default the optimizations are switched off and the guest has to activate
72 * the feature. On PV the guest must do this via the hypercall
73 * __HYPERVISOR_opt_feature, on HVM it's done within xen in set_os_type().
74 */
76 /*
77 * Helper struct for the different identity mapping optimizations.
78 * The hypervisor does the insertion of address translations in the tlb
79 * for identity mapped areas without reflecting the page fault
80 * to the guest.
81 */
82 struct identity_mapping {
83 unsigned long pgprot; /* The page protection bit mask of the pte.*/
84 unsigned long key; /* A protection key. */
85 };
87 /* Central structure for optimzation features used by the hypervisor. */
88 struct opt_feature {
89 unsigned long mask; /* For every feature one bit. */
90 struct identity_mapping im_reg4; /* Region 4 identity mapping */
91 struct identity_mapping im_reg5; /* Region 5 identity mapping */
92 struct identity_mapping im_reg7; /* Region 7 identity mapping */
93 };
95 /*
96 * The base XEN_IA64_OPTF_IDENT_MAP_REG7 is defined in public/arch-ia64.h.
97 * Identity mapping of region 4 addresses in HVM.
98 */
99 #define XEN_IA64_OPTF_IDENT_MAP_REG4 (XEN_IA64_OPTF_IDENT_MAP_REG7 + 1)
100 /* Identity mapping of region 5 addresses in HVM. */
101 #define XEN_IA64_OPTF_IDENT_MAP_REG5 (XEN_IA64_OPTF_IDENT_MAP_REG4 + 1)
103 /* Set an optimization feature in the struct arch_domain. */
104 extern int domain_opt_feature(struct xen_ia64_opt_feature*);
106 struct arch_domain {
107 struct mm_struct mm;
109 /* Flags. */
110 union {
111 unsigned long flags;
112 struct {
113 unsigned int is_vti : 1;
114 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
115 unsigned int has_pervcpu_vhpt : 1;
116 #endif
117 };
118 };
120 /* maximum metaphysical address of conventional memory */
121 u64 convmem_end;
123 /* Allowed accesses to io ports. */
124 struct rangeset *ioport_caps;
126 /* There are two ranges of RID for a domain:
127 one big range, used to virtualize domain RID,
128 one small range for internal Xen use (metaphysical). */
129 /* Big range. */
130 int starting_rid; /* first RID assigned to domain */
131 int ending_rid; /* one beyond highest RID assigned to domain */
132 /* Metaphysical range. */
133 int starting_mp_rid;
134 int ending_mp_rid;
135 /* RID for metaphysical mode. */
136 unsigned long metaphysical_rr0;
137 unsigned long metaphysical_rr4;
139 int rid_bits; /* number of virtual rid bits (default: 18) */
140 int breakimm; /* The imm value for hypercalls. */
142 struct virtual_platform_def vmx_platform;
143 #define hvm_domain vmx_platform /* platform defs are not vmx specific */
145 u64 xen_vastart;
146 u64 xen_vaend;
147 u64 shared_info_va;
149 /* Address of SAL emulator data */
150 struct xen_sal_data *sal_data;
152 /* Address of efi_runtime_services_t (placed in domain memory) */
153 void *efi_runtime;
154 /* Address of fpswa_interface_t (placed in domain memory) */
155 void *fpswa_inf;
157 /* Bitmap of shadow dirty bits.
158 Set iff shadow mode is enabled. */
159 u64 *shadow_bitmap;
160 /* Length (in bits!) of shadow bitmap. */
161 unsigned long shadow_bitmap_size;
162 /* Number of bits set in bitmap. */
163 atomic64_t shadow_dirty_count;
164 /* Number of faults. */
165 atomic64_t shadow_fault_count;
167 struct last_vcpu last_vcpu[NR_CPUS];
169 struct opt_feature opt_feature;
171 #ifdef CONFIG_XEN_IA64_TLB_TRACK
172 struct tlb_track* tlb_track;
173 #endif
175 /* for domctl_destroy_domain continuation */
176 unsigned long mm_teardown_offset;
177 };
178 #define INT_ENABLE_OFFSET(v) \
179 (sizeof(vcpu_info_t) * (v)->vcpu_id + \
180 offsetof(vcpu_info_t, evtchn_upcall_mask))
182 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
183 #define HAS_PERVCPU_VHPT(d) ((d)->arch.has_pervcpu_vhpt)
184 #else
185 #define HAS_PERVCPU_VHPT(d) (0)
186 #endif
189 struct arch_vcpu {
190 /* Save the state of vcpu.
191 This is the first entry to speed up accesses. */
192 mapped_regs_t *privregs;
194 /* TR and TC. */
195 TR_ENTRY itrs[NITRS];
196 TR_ENTRY dtrs[NDTRS];
197 TR_ENTRY itlb;
198 TR_ENTRY dtlb;
200 /* Bit is set if there is a tr/tc for the region. */
201 unsigned char itr_regions;
202 unsigned char dtr_regions;
203 unsigned char tc_regions;
205 unsigned long irr[4]; /* Interrupt request register. */
206 unsigned long insvc[4]; /* Interrupt in service. */
207 unsigned long iva;
208 unsigned long domain_itm;
209 unsigned long domain_itm_last;
211 unsigned long event_callback_ip; // event callback handler
212 unsigned long failsafe_callback_ip; // Do we need it?
214 /* These fields are copied from arch_domain to make access easier/faster
215 in assembly code. */
216 unsigned long metaphysical_rr0; // from arch_domain (so is pinned)
217 unsigned long metaphysical_rr4; // from arch_domain (so is pinned)
218 unsigned long metaphysical_saved_rr0; // from arch_domain (so is pinned)
219 unsigned long metaphysical_saved_rr4; // from arch_domain (so is pinned)
220 unsigned long fp_psr; // used for lazy float register
221 int breakimm; // from arch_domain (so is pinned)
222 int starting_rid; /* first RID assigned to domain */
223 int ending_rid; /* one beyond highest RID assigned to domain */
225 /* Bitset for debug register use. */
226 unsigned int dbg_used;
227 u64 dbr[IA64_NUM_DBG_REGS];
228 u64 ibr[IA64_NUM_DBG_REGS];
230 struct thread_struct _thread; // this must be last
232 thash_cb_t vtlb;
233 thash_cb_t vhpt;
234 char irq_new_pending;
235 char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI
236 char hypercall_continuation;
238 //for phycial emulation
239 int mode_flags;
240 fpswa_ret_t fpswa_ret; /* save return values of FPSWA emulation */
241 struct timer hlt_timer;
242 struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
244 /* This vector hosts the protection keys for pkr emulation of PV domains.
245 * Currently only 15 registers are usable by domU's. pkr[15] is
246 * reserved for the hypervisor. */
247 unsigned long pkrs[XEN_IA64_NPKRS+1]; /* protection key registers */
248 #define XEN_IA64_PKR_IN_USE 0x1 /* If psr.pk = 1 was set. */
249 unsigned char pkr_flags;
251 unsigned char vhpt_pg_shift; /* PAGE_SHIFT or less */
252 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
253 PTA pta;
254 unsigned long vhpt_maddr;
255 struct page_info* vhpt_page;
256 unsigned long vhpt_entries;
257 #endif
258 #define INVALID_PROCESSOR INT_MAX
259 int last_processor;
260 cpumask_t cache_coherent_map;
261 };
263 #include <asm/uaccess.h> /* for KERNEL_DS */
264 #include <asm/pgtable.h>
266 int
267 do_perfmon_op(unsigned long cmd,
268 XEN_GUEST_HANDLE(void) arg1, unsigned long arg2);
270 #endif /* __ASM_DOMAIN_H__ */
272 /*
273 * Local variables:
274 * mode: C
275 * c-set-style: "BSD"
276 * c-basic-offset: 4
277 * tab-width: 4
278 * indent-tabs-mode: nil
279 * End:
280 */