ia64/xen-unstable

annotate xen/include/asm-ia64/domain.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents af0da711bbdb
children
rev   line source
iap10@3833 1 #ifndef __ASM_DOMAIN_H__
iap10@3833 2 #define __ASM_DOMAIN_H__
iap10@3833 3
iap10@3833 4 #include <linux/thread_info.h>
djm@5470 5 #include <asm/tlb.h>
adsharma@4993 6 #include <asm/vmx_vpd.h>
adsharma@4993 7 #include <asm/vmmu.h>
adsharma@4993 8 #include <asm/regionreg.h>
kfraser@11119 9 #include <public/xen.h>
fred@5950 10 #include <asm/vmx_platform.h>
cl349@5296 11 #include <xen/list.h>
awilliam@8914 12 #include <xen/cpumask.h>
yamahata@19202 13 #include <xen/mm.h>
awilliam@10158 14 #include <asm/fpswa.h>
awilliam@10816 15 #include <xen/rangeset.h>
iap10@3833 16
awilliam@11808 17 struct p2m_entry;
awilliam@11808 18 #ifdef CONFIG_XEN_IA64_TLB_TRACK
awilliam@11808 19 struct tlb_track;
awilliam@11808 20 #endif
awilliam@10423 21
keir@17434 22 extern unsigned long volatile jiffies;
keir@17434 23
awilliam@10925 24 struct vcpu;
awilliam@10925 25 extern void relinquish_vcpu_resources(struct vcpu *v);
awilliam@13434 26 extern int vcpu_late_initialise(struct vcpu *v);
iap10@3833 27
awilliam@10252 28 /* given a current domain metaphysical address, return the physical address */
awilliam@10423 29 extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,
awilliam@10423 30 struct p2m_entry* entry);
awilliam@10252 31
awilliam@10444 32 /* Set shared_info virtual address. */
awilliam@10444 33 extern unsigned long domain_set_shared_info_va (unsigned long va);
awilliam@10444 34
awilliam@9485 35 /* Flush cache of domain d.
awilliam@9485 36 If sync_only is true, only synchronize I&D caches,
awilliam@9485 37 if false, flush and invalidate caches. */
awilliam@9485 38 extern void domain_cache_flush (struct domain *d, int sync_only);
awilliam@9485 39
awilliam@10786 40 /* Control the shadow mode. */
kfraser@11296 41 extern int shadow_mode_control(struct domain *d, xen_domctl_shadow_op_t *sc);
awilliam@10786 42
awilliam@9741 43 /* Cleanly crash the current domain with a message. */
awilliam@9741 44 extern void panic_domain(struct pt_regs *, const char *, ...)
awilliam@9741 45 __attribute__ ((noreturn, format (printf, 2, 3)));
awilliam@9741 46
yamahata@18688 47 #define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))
yamahata@18688 48
awilliam@10019 49 struct mm_struct {
awilliam@12664 50 volatile pgd_t * pgd;
awilliam@10019 51 // atomic_t mm_users; /* How many users with user space? */
awilliam@10019 52 };
awilliam@10019 53
alex@15842 54 struct foreign_p2m {
alex@15842 55 spinlock_t lock;
alex@15842 56 /*
alex@15842 57 * sorted list with entry->gpfn.
alex@15842 58 * It is expected that only small number of foreign domain p2m
alex@15842 59 * mapping happens at the same time.
alex@15842 60 */
alex@15842 61 struct list_head head;
alex@15842 62 };
alex@15842 63
awilliam@10567 64 struct last_vcpu {
awilliam@10567 65 #define INVALID_VCPU_ID INT_MAX
awilliam@10567 66 int vcpu_id;
awilliam@11812 67 #ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
awilliam@11812 68 u32 tlbflush_timestamp;
awilliam@11812 69 #endif
awilliam@10567 70 } ____cacheline_aligned_in_smp;
awilliam@10567 71
awilliam@10570 72 /* These are data in domain memory for SAL emulator. */
awilliam@10570 73 struct xen_sal_data {
awilliam@10570 74 /* OS boot rendez vous. */
awilliam@10570 75 unsigned long boot_rdv_ip;
awilliam@10570 76 unsigned long boot_rdv_r1;
awilliam@10570 77
awilliam@10570 78 /* There are these for EFI_SET_VIRTUAL_ADDRESS_MAP emulation. */
awilliam@10570 79 int efi_virt_mode; /* phys : 0 , virt : 1 */
awilliam@10570 80 };
awilliam@10570 81
alex@15481 82 /*
alex@16477 83 * Optimization features are used by the hypervisor to do some optimizations
alex@16477 84 * for guests. By default the optimizations are switched off and the guest
alex@16477 85 * may activate the feature. The guest may do this via the hypercall
alex@16477 86 * __HYPERVISOR_opt_feature. Domain builder code can also enable these
alex@16477 87 * via XEN_DOMCTL_set_opt_feature.
alex@15481 88 */
alex@15481 89
alex@15481 90 /*
alex@15481 91 * Helper struct for the different identity mapping optimizations.
alex@15481 92 * The hypervisor does the insertion of address translations in the tlb
alex@15481 93 * for identity mapped areas without reflecting the page fault
alex@15481 94 * to the guest.
alex@15481 95 */
alex@15481 96 struct identity_mapping {
alex@15481 97 unsigned long pgprot; /* The page protection bit mask of the pte.*/
alex@15481 98 unsigned long key; /* A protection key. */
alex@15481 99 };
alex@15481 100
alex@16682 101 /* opt_feature mask */
alex@16682 102 /*
alex@16682 103 * If this feature is switched on, the hypervisor inserts the
alex@16682 104 * tlb entries without calling the guests traphandler.
alex@16682 105 * This is useful in guests using region 7 for identity mapping
alex@16682 106 * like the linux kernel does.
alex@16682 107 */
alex@16682 108 #define XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT 0
alex@16682 109 #define XEN_IA64_OPTF_IDENT_MAP_REG7_FLG \
alex@16682 110 (1UL << XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT)
alex@16682 111
alex@16682 112 /* Identity mapping of region 4 addresses in HVM. */
alex@16682 113 #define XEN_IA64_OPTF_IDENT_MAP_REG4_FLG_BIT \
alex@16682 114 (XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT + 1)
alex@16682 115 #define XEN_IA64_OPTF_IDENT_MAP_REG4_FLG \
alex@16682 116 (1UL << XEN_IA64_OPTF_IDENT_MAP_REG4_FLG_BIT)
alex@16682 117
alex@16682 118 /* Identity mapping of region 5 addresses in HVM. */
alex@16682 119 #define XEN_IA64_OPTF_IDENT_MAP_REG5_FLG_BIT \
alex@16682 120 (XEN_IA64_OPTF_IDENT_MAP_REG7_FLG_BIT + 2)
alex@16682 121 #define XEN_IA64_OPTF_IDENT_MAP_REG5_FLG \
alex@16682 122 (1UL << XEN_IA64_OPTF_IDENT_MAP_REG5_FLG_BIT)
alex@16682 123
alex@15481 124 /* Central structure for optimzation features used by the hypervisor. */
alex@15481 125 struct opt_feature {
alex@15481 126 unsigned long mask; /* For every feature one bit. */
alex@15481 127 struct identity_mapping im_reg4; /* Region 4 identity mapping */
alex@15481 128 struct identity_mapping im_reg5; /* Region 5 identity mapping */
alex@15481 129 struct identity_mapping im_reg7; /* Region 7 identity mapping */
alex@15481 130 };
alex@15481 131
alex@15481 132 /* Set an optimization feature in the struct arch_domain. */
alex@16399 133 extern int domain_opt_feature(struct domain *, struct xen_ia64_opt_feature*);
alex@15481 134
iap10@3833 135 struct arch_domain {
awilliam@10019 136 struct mm_struct mm;
awilliam@10570 137
awilliam@10570 138 /* Flags. */
awilliam@10570 139 union {
awilliam@10570 140 unsigned long flags;
alex@17072 141 struct {
alex@17072 142 unsigned int is_sioemu : 1;
alex@16748 143 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
awilliam@11745 144 unsigned int has_pervcpu_vhpt : 1;
keir@16158 145 unsigned int vhpt_size_log2 : 6;
alex@17072 146 #endif
alex@16748 147 };
awilliam@10570 148 };
awilliam@9685 149
awilliam@13114 150 /* maximum metaphysical address of conventional memory */
awilliam@13114 151 u64 convmem_end;
awilliam@13114 152
awilliam@10816 153 /* Allowed accesses to io ports. */
awilliam@10816 154 struct rangeset *ioport_caps;
awilliam@10816 155
awilliam@9685 156 /* There are two ranges of RID for a domain:
awilliam@9685 157 one big range, used to virtualize domain RID,
awilliam@9685 158 one small range for internal Xen use (metaphysical). */
awilliam@9685 159 /* Big range. */
alex@16211 160 unsigned int starting_rid; /* first RID assigned to domain */
alex@16211 161 unsigned int ending_rid; /* one beyond highest RID assigned to domain */
awilliam@9685 162 /* Metaphysical range. */
alex@16211 163 unsigned int starting_mp_rid;
alex@16211 164 unsigned int ending_mp_rid;
awilliam@10570 165 /* RID for metaphysical mode. */
alex@16211 166 unsigned int metaphysical_rid_dt; /* dt=it=0 */
alex@16211 167 unsigned int metaphysical_rid_d; /* dt=0, it=1 */
awilliam@10570 168
alex@16211 169 unsigned char rid_bits; /* number of virtual rid bits (default: 18) */
alex@16211 170 int breakimm; /* The imm value for hypercalls. */
fred@5986 171
yamahata@18688 172 struct list_head pdev_list;
kaf24@8708 173 struct virtual_platform_def vmx_platform;
kaf24@8708 174 #define hvm_domain vmx_platform /* platform defs are not vmx specific */
fred@5986 175
awilliam@10570 176 u64 shared_info_va;
awilliam@10570 177
awilliam@10570 178 /* Address of SAL emulator data */
awilliam@10570 179 struct xen_sal_data *sal_data;
awilliam@10246 180
yamahata@18862 181 /* Shared page for notifying that explicit PIRQ EOI is required. */
yamahata@18862 182 unsigned long *pirq_eoi_map;
yamahata@18862 183 unsigned long pirq_eoi_map_mfn;
yamahata@18862 184
awilliam@10570 185 /* Address of efi_runtime_services_t (placed in domain memory) */
awilliam@9985 186 void *efi_runtime;
awilliam@10570 187 /* Address of fpswa_interface_t (placed in domain memory) */
awilliam@10158 188 void *fpswa_inf;
awilliam@10567 189
awilliam@10786 190 /* Bitmap of shadow dirty bits.
awilliam@10786 191 Set iff shadow mode is enabled. */
awilliam@10786 192 u64 *shadow_bitmap;
awilliam@10786 193 /* Length (in bits!) of shadow bitmap. */
awilliam@10786 194 unsigned long shadow_bitmap_size;
awilliam@10786 195 /* Number of bits set in bitmap. */
awilliam@10786 196 atomic64_t shadow_dirty_count;
awilliam@10786 197 /* Number of faults. */
awilliam@10786 198 atomic64_t shadow_fault_count;
awilliam@10786 199
alex@15842 200 /* for foreign domain p2m table mapping */
alex@15842 201 struct foreign_p2m foreign_p2m;
alex@15842 202
awilliam@10567 203 struct last_vcpu last_vcpu[NR_CPUS];
awilliam@11269 204
alex@15481 205 struct opt_feature opt_feature;
alex@15481 206
alex@15893 207 /* Debugging flags. See arch-ia64.h for bits definition. */
alex@15893 208 unsigned int debug_flags;
alex@15893 209
alex@15893 210 /* Reason of debugging break. */
alex@15893 211 unsigned int debug_event;
alex@15893 212
awilliam@11808 213 #ifdef CONFIG_XEN_IA64_TLB_TRACK
awilliam@11808 214 struct tlb_track* tlb_track;
awilliam@11808 215 #endif
kfraser@15826 216
kfraser@15826 217 /* for domctl_destroy_domain continuation */
alex@16191 218 enum {
alex@16191 219 RELRES_not_started,
alex@16191 220 RELRES_mm_teardown,
alex@16191 221 RELRES_xen,
alex@16191 222 RELRES_dom,
alex@16191 223 RELRES_done,
alex@16191 224 } relres;
alex@16191 225 /* Continuable mm_teardown() */
kfraser@15826 226 unsigned long mm_teardown_offset;
alex@16191 227 /* Continuable domain_relinquish_resources() */
yamahata@19202 228 struct page_list_head relmem_list;
iap10@3833 229 };
awilliam@9479 230 #define INT_ENABLE_OFFSET(v) \
awilliam@9479 231 (sizeof(vcpu_info_t) * (v)->vcpu_id + \
awilliam@9479 232 offsetof(vcpu_info_t, evtchn_upcall_mask))
iap10@3833 233
awilliam@11745 234 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
awilliam@11745 235 #define HAS_PERVCPU_VHPT(d) ((d)->arch.has_pervcpu_vhpt)
awilliam@11745 236 #else
awilliam@11745 237 #define HAS_PERVCPU_VHPT(d) (0)
awilliam@11745 238 #endif
awilliam@11745 239
awilliam@11745 240
kaf24@5289 241 struct arch_vcpu {
awilliam@10570 242 /* Save the state of vcpu.
awilliam@10570 243 This is the first entry to speed up accesses. */
awilliam@10570 244 mapped_regs_t *privregs;
awilliam@9770 245
awilliam@10570 246 /* TR and TC. */
awilliam@10570 247 TR_ENTRY itrs[NITRS];
awilliam@10570 248 TR_ENTRY dtrs[NDTRS];
awilliam@10570 249 TR_ENTRY itlb;
awilliam@10570 250 TR_ENTRY dtlb;
awilliam@10570 251
awilliam@10570 252 /* Bit is set if there is a tr/tc for the region. */
awilliam@10570 253 unsigned char itr_regions;
awilliam@10570 254 unsigned char dtr_regions;
awilliam@10570 255 unsigned char tc_regions;
awilliam@10570 256
awilliam@10570 257 unsigned long irr[4]; /* Interrupt request register. */
awilliam@10570 258 unsigned long insvc[4]; /* Interrupt in service. */
awilliam@10570 259 unsigned long iva;
awilliam@10570 260 unsigned long domain_itm;
awilliam@10570 261 unsigned long domain_itm_last;
awilliam@10570 262
awilliam@10140 263 unsigned long event_callback_ip; // event callback handler
awilliam@10140 264 unsigned long failsafe_callback_ip; // Do we need it?
awilliam@9770 265
awilliam@9770 266 /* These fields are copied from arch_domain to make access easier/faster
awilliam@9770 267 in assembly code. */
alex@15898 268 unsigned long metaphysical_rid_dt; // from arch_domain (so is pinned)
alex@15898 269 unsigned long metaphysical_rid_d; // from arch_domain (so is pinned)
awilliam@8831 270 unsigned long metaphysical_saved_rr0; // from arch_domain (so is pinned)
awilliam@8831 271 unsigned long metaphysical_saved_rr4; // from arch_domain (so is pinned)
awilliam@13469 272 unsigned long fp_psr; // used for lazy float register
alex@16784 273 u64 *shadow_bitmap; // from arch_domain (so is pinned)
djm@5264 274 int breakimm; // from arch_domain (so is pinned)
djm@5526 275 int starting_rid; /* first RID assigned to domain */
djm@5526 276 int ending_rid; /* one beyond highest RID assigned to domain */
alex@16782 277 unsigned char rid_bits; // from arch_domain (so is pinned)
awilliam@9770 278
alex@15414 279 /* Bitset for debug register use. */
alex@15414 280 unsigned int dbg_used;
alex@15414 281 u64 dbr[IA64_NUM_DBG_REGS];
alex@15414 282 u64 ibr[IA64_NUM_DBG_REGS];
alex@15414 283
djm@4806 284 struct thread_struct _thread; // this must be last
fred@5986 285
awilliam@9765 286 thash_cb_t vtlb;
awilliam@9765 287 thash_cb_t vhpt;
djm@5797 288 char irq_new_pending;
djm@5797 289 char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI
fred@5949 290 char hypercall_continuation;
awilliam@11039 291
yamahata@18875 292 fpswa_ret_t fpswa_ret; /* save return values of FPSWA emulation */
awilliam@11268 293 struct timer hlt_timer;
adsharma@4993 294 struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
awilliam@10571 295
alex@15664 296 /* This vector hosts the protection keys for pkr emulation of PV domains.
alex@15664 297 * Currently only 15 registers are usable by domU's. pkr[15] is
alex@15664 298 * reserved for the hypervisor. */
alex@15664 299 unsigned long pkrs[XEN_IA64_NPKRS+1]; /* protection key registers */
alex@15664 300 #define XEN_IA64_PKR_IN_USE 0x1 /* If psr.pk = 1 was set. */
alex@15664 301 unsigned char pkr_flags;
alex@15664 302
alex@15726 303 unsigned char vhpt_pg_shift; /* PAGE_SHIFT or less */
awilliam@11745 304 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
awilliam@11745 305 PTA pta;
awilliam@11745 306 unsigned long vhpt_maddr;
awilliam@11745 307 struct page_info* vhpt_page;
awilliam@11745 308 unsigned long vhpt_entries;
awilliam@11745 309 #endif
awilliam@10571 310 #define INVALID_PROCESSOR INT_MAX
awilliam@10571 311 int last_processor;
awilliam@13905 312 cpumask_t cache_coherent_map;
iap10@3833 313 };
djm@4806 314
iap10@3833 315 #include <asm/uaccess.h> /* for KERNEL_DS */
iap10@3833 316 #include <asm/pgtable.h>
iap10@3833 317
awilliam@12629 318 int
awilliam@12629 319 do_perfmon_op(unsigned long cmd,
awilliam@12629 320 XEN_GUEST_HANDLE(void) arg1, unsigned long arg2);
awilliam@12629 321
alex@16759 322 void
alex@16759 323 ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
alex@16759 324 unsigned long iim, unsigned long itir, unsigned long arg5,
alex@16759 325 unsigned long arg6, unsigned long arg7, unsigned long stack);
alex@16759 326
yamahata@18873 327 void
yamahata@18873 328 ia64_lazy_load_fpu(struct vcpu *vcpu);
yamahata@18873 329
keir@19081 330 int construct_dom0(
keir@19081 331 struct domain *d,
keir@19081 332 unsigned long image_start, unsigned long image_len,
keir@19081 333 unsigned long initrd_start, unsigned long initrd_len,
keir@19081 334 char *cmdline);
keir@19081 335
iap10@3833 336 #endif /* __ASM_DOMAIN_H__ */
kaf24@3914 337
kaf24@3914 338 /*
kaf24@3914 339 * Local variables:
kaf24@3914 340 * mode: C
kaf24@3914 341 * c-set-style: "BSD"
kaf24@3914 342 * c-basic-offset: 4
kaf24@3914 343 * tab-width: 4
kaf24@3914 344 * indent-tabs-mode: nil
kaf24@3988 345 * End:
kaf24@3914 346 */