ia64/xen-unstable

annotate xen/include/asm-ia64/vmx_vcpu.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 6607624285b2
children
rev   line source
awilliam@11817 1 /* -*- Mode:C; c-basic-offset:8; tab-width:8; indent-tabs-mode:nil -*- */
adsharma@4993 2 /*
adsharma@4993 3 * vmx_vcpu.h:
adsharma@4993 4 * Copyright (c) 2005, Intel Corporation.
adsharma@4993 5 *
adsharma@4993 6 * This program is free software; you can redistribute it and/or modify it
adsharma@4993 7 * under the terms and conditions of the GNU General Public License,
adsharma@4993 8 * version 2, as published by the Free Software Foundation.
adsharma@4993 9 *
adsharma@4993 10 * This program is distributed in the hope it will be useful, but WITHOUT
adsharma@4993 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
adsharma@4993 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
adsharma@4993 13 * more details.
adsharma@4993 14 *
adsharma@4993 15 * You should have received a copy of the GNU General Public License along with
adsharma@4993 16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
adsharma@4993 17 * Place - Suite 330, Boston, MA 02111-1307 USA.
adsharma@4993 18 *
adsharma@4993 19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
adsharma@4993 20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
adsharma@4993 21 */
adsharma@4993 22
adsharma@4993 23 #ifndef _XEN_IA64_VMX_VCPU_H
adsharma@4993 24 #define _XEN_IA64_VMX_VCPU_H
adsharma@4993 25
adsharma@4993 26 #include <xen/sched.h>
adsharma@4993 27 #include <asm/ia64_int.h>
adsharma@4993 28 #include <asm/vmx_vpd.h>
adsharma@4993 29 #include <asm/ptrace.h>
adsharma@4993 30 #include <asm/regs.h>
adsharma@4993 31 #include <asm/regionreg.h>
adsharma@4993 32 #include <asm/types.h>
adsharma@4993 33 #include <asm/vcpu.h>
adsharma@4993 34
awilliam@11817 35 #define VRN_SHIFT 61
awilliam@11817 36 #define VRN0 0x0UL
awilliam@11817 37 #define VRN1 0x1UL
awilliam@11817 38 #define VRN2 0x2UL
awilliam@11817 39 #define VRN3 0x3UL
awilliam@11817 40 #define VRN4 0x4UL
awilliam@11817 41 #define VRN5 0x5UL
awilliam@11817 42 #define VRN6 0x6UL
awilliam@11817 43 #define VRN7 0x7UL
djm@6801 44 // for vlsapic
awilliam@11817 45 #define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
adsharma@4993 46
adsharma@4993 47 #define VMX(x,y) ((x)->arch.arch_vmx.y)
adsharma@4993 48
awilliam@11817 49 #define VMM_RR_SHIFT 20
awilliam@11817 50 #define VMM_RR_MASK ((1UL<<VMM_RR_SHIFT)-1)
awilliam@8917 51
awilliam@11817 52 extern u64 indirect_reg_igfld_MASK(int type, int index, u64 value);
awilliam@11817 53 extern u64 cr_igfld_mask(int index, u64 value);
awilliam@11817 54 extern int check_indirect_reg_rsv_fields(int type, int index, u64 value);
awilliam@11817 55 extern u64 set_isr_ei_ni(VCPU * vcpu);
awilliam@11817 56 extern u64 set_isr_for_na_inst(VCPU * vcpu, int op);
alex@17071 57 extern void set_illegal_op_isr (VCPU *vcpu);
adsharma@4993 58
djm@6878 59 /* next all for VTI domain APIs definition */
awilliam@11817 60 extern void vmx_vcpu_set_psr(VCPU * vcpu, unsigned long value);
awilliam@11817 61 extern IA64FAULT vmx_vcpu_cover(VCPU * vcpu);
awilliam@11817 62 extern IA64FAULT vmx_vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val);
alex@15009 63 extern u64 vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg);
awilliam@11817 64 IA64FAULT vmx_vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val);
awilliam@11817 65 extern IA64FAULT vmx_vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
awilliam@11817 66 extern IA64FAULT vmx_vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
awilliam@11817 67 extern IA64FAULT vmx_vcpu_itr_i(VCPU * vcpu, u64 slot, u64 pte, u64 itir,
awilliam@11817 68 u64 ifa);
awilliam@11817 69 extern IA64FAULT vmx_vcpu_itr_d(VCPU * vcpu, u64 slot, u64 pte, u64 itir,
awilliam@11817 70 u64 ifa);
awilliam@11817 71 extern IA64FAULT vmx_vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 ps);
awilliam@11817 72 extern IA64FAULT vmx_vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 ps);
awilliam@11817 73 extern IA64FAULT vmx_vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 ps);
awilliam@11817 74 extern IA64FAULT vmx_vcpu_ptc_e(VCPU * vcpu, u64 vadr);
awilliam@11817 75 extern IA64FAULT vmx_vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 ps);
awilliam@11817 76 extern IA64FAULT vmx_vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 ps);
alex@15009 77 extern u64 vmx_vcpu_thash(VCPU * vcpu, u64 vadr);
awilliam@11817 78 extern u64 vmx_vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa);
alex@15009 79 extern u64 vmx_vcpu_ttag(VCPU * vcpu, u64 vadr);
awilliam@11817 80 extern IA64FAULT vmx_vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr);
alex@15009 81 extern u64 vmx_vcpu_tak(VCPU * vcpu, u64 vadr);
awilliam@11817 82 extern IA64FAULT vmx_vcpu_rfi(VCPU * vcpu);
awilliam@11817 83 extern u64 vmx_vcpu_get_psr(VCPU * vcpu);
awilliam@11817 84 extern IA64FAULT vmx_vcpu_get_bgr(VCPU * vcpu, unsigned int reg, u64 * val);
awilliam@11817 85 extern IA64FAULT vmx_vcpu_set_bgr(VCPU * vcpu, unsigned int reg, u64 val,
awilliam@11817 86 int nat);
djm@6867 87 #if 0
awilliam@11817 88 extern IA64FAULT vmx_vcpu_get_gr(VCPU * vcpu, unsigned reg, u64 * val);
awilliam@11817 89 extern IA64FAULT vmx_vcpu_set_gr(VCPU * vcpu, unsigned reg, u64 value, int nat);
djm@6867 90 #endif
awilliam@11817 91 extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24);
awilliam@11817 92 extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU * vcpu, u64 imm24);
awilliam@11817 93 extern IA64FAULT vmx_vcpu_set_psr_l(VCPU * vcpu, u64 val);
awilliam@11817 94 extern void vtm_init(VCPU * vcpu);
awilliam@11817 95 extern uint64_t vtm_get_itc(VCPU * vcpu);
awilliam@11817 96 extern void vtm_set_itc(VCPU * vcpu, uint64_t new_itc);
awilliam@11817 97 extern void vtm_set_itv(VCPU * vcpu, uint64_t val);
awilliam@11817 98 extern void vtm_set_itm(VCPU * vcpu, uint64_t val);
awilliam@11817 99 extern void vlsapic_reset(VCPU * vcpu);
awilliam@11817 100 extern int vmx_check_pending_irq(VCPU * vcpu);
awilliam@11817 101 extern void guest_write_eoi(VCPU * vcpu);
awilliam@11817 102 extern int is_unmasked_irq(VCPU * vcpu);
awilliam@11817 103 extern uint64_t guest_read_vivr(VCPU * vcpu);
awilliam@11817 104 extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
awilliam@11817 105 extern void vcpu_load_kernel_regs(VCPU * vcpu);
yamahata@18083 106 extern void __vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
yamahata@18094 107 void *shared_arch_info);
yamahata@18362 108 extern void __vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid);
yamahata@18362 109 extern void vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid);
awilliam@13840 110 extern void vmx_ia64_set_dcr(VCPU * v);
alex@17068 111 extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
yamahata@17607 112 extern void vmx_asm_bsw0(void);
yamahata@17607 113 extern void vmx_asm_bsw1(void);
awilliam@9012 114
adsharma@4993 115 /**************************************************************************
adsharma@4993 116 VCPU control register access routines
adsharma@4993 117 **************************************************************************/
adsharma@4993 118
alex@15009 119 static inline u64 vmx_vcpu_get_itm(VCPU * vcpu)
awilliam@11817 120 {
alex@15009 121 return ((u64)VCPU(vcpu, itm));
awilliam@11817 122 }
awilliam@11817 123
alex@15009 124 static inline u64 vmx_vcpu_get_iva(VCPU * vcpu)
awilliam@11817 125 {
alex@15009 126 return ((u64)VCPU(vcpu, iva));
awilliam@11817 127 }
awilliam@11817 128
alex@15009 129 static inline u64 vmx_vcpu_get_pta(VCPU * vcpu)
awilliam@11817 130 {
alex@15009 131 return ((u64)VCPU(vcpu, pta));
awilliam@11817 132 }
awilliam@11817 133
alex@15009 134 static inline u64 vmx_vcpu_get_lid(VCPU * vcpu)
awilliam@11817 135 {
alex@15009 136 return ((u64)VCPU(vcpu, lid));
awilliam@11817 137 }
awilliam@11817 138
alex@15009 139 static inline u64 vmx_vcpu_get_ivr(VCPU * vcpu)
awilliam@11817 140 {
alex@15009 141 return ((u64)guest_read_vivr(vcpu));
awilliam@11817 142 }
awilliam@11817 143
alex@15009 144 static inline u64 vmx_vcpu_get_tpr(VCPU * vcpu)
awilliam@11817 145 {
alex@15009 146 return ((u64)VCPU(vcpu, tpr));
alex@15009 147 }
alex@15009 148
alex@15009 149 static inline u64 vmx_vcpu_get_eoi(VCPU * vcpu)
alex@15009 150 {
alex@15009 151 return (0UL); // reads of eoi always return 0
alex@15009 152 }
alex@15009 153
alex@15009 154 static inline u64 vmx_vcpu_get_irr0(VCPU * vcpu)
alex@15009 155 {
alex@15009 156 return ((u64)VCPU(vcpu, irr[0]));
alex@15009 157 }
alex@15009 158
alex@15009 159 static inline u64 vmx_vcpu_get_irr1(VCPU * vcpu)
alex@15009 160 {
alex@15009 161 return ((u64)VCPU(vcpu, irr[1]));
alex@15009 162 }
alex@15009 163
alex@15009 164 static inline u64 vmx_vcpu_get_irr2(VCPU * vcpu)
alex@15009 165 {
alex@15009 166 return ((u64)VCPU(vcpu, irr[2]));
alex@15009 167 }
alex@15009 168
alex@15009 169 static inline u64 vmx_vcpu_get_irr3(VCPU * vcpu)
alex@15009 170 {
alex@15009 171 return ((u64)VCPU(vcpu, irr[3]));
alex@15009 172 }
alex@15009 173
alex@15009 174 static inline u64 vmx_vcpu_get_itv(VCPU * vcpu)
alex@15009 175 {
alex@15009 176 return ((u64)VCPU(vcpu, itv));
alex@15009 177 }
alex@15009 178
alex@15009 179 static inline u64 vmx_vcpu_get_pmv(VCPU * vcpu)
alex@15009 180 {
alex@15009 181 return ((u64)VCPU(vcpu, pmv));
alex@15009 182 }
alex@15009 183
alex@15009 184 static inline u64 vmx_vcpu_get_cmcv(VCPU * vcpu)
alex@15009 185 {
alex@15009 186 return ((u64)VCPU(vcpu, cmcv));
alex@15009 187 }
alex@15009 188
alex@15009 189 static inline u64 vmx_vcpu_get_lrr0(VCPU * vcpu)
alex@15009 190 {
alex@15009 191 return ((u64)VCPU(vcpu, lrr0));
alex@15009 192 }
alex@15009 193
alex@15009 194 static inline u64 vmx_vcpu_get_lrr1(VCPU * vcpu)
alex@15009 195 {
alex@15009 196 return ((u64)VCPU(vcpu, lrr1));
awilliam@11817 197 }
awilliam@11817 198
awilliam@11817 199 static inline IA64FAULT vmx_vcpu_set_itm(VCPU * vcpu, u64 val)
awilliam@11817 200 {
awilliam@11817 201 vtm_set_itm(vcpu, val);
awilliam@11817 202 return IA64_NO_FAULT;
awilliam@11817 203 }
awilliam@11817 204
awilliam@11817 205 static inline IA64FAULT vmx_vcpu_set_iva(VCPU * vcpu, u64 val)
awilliam@11817 206 {
awilliam@11817 207 VCPU(vcpu, iva) = val;
awilliam@11817 208 return IA64_NO_FAULT;
awilliam@11817 209 }
awilliam@11817 210
awilliam@11817 211 static inline IA64FAULT vmx_vcpu_set_pta(VCPU * vcpu, u64 val)
awilliam@11817 212 {
awilliam@11817 213 VCPU(vcpu, pta) = val;
awilliam@11817 214 return IA64_NO_FAULT;
awilliam@11817 215 }
awilliam@11817 216
awilliam@11817 217 static inline IA64FAULT vmx_vcpu_set_lid(VCPU * vcpu, u64 val)
awilliam@11817 218 {
awilliam@11817 219 VCPU(vcpu, lid) = val;
awilliam@11817 220 return IA64_NO_FAULT;
awilliam@11817 221 }
awilliam@11817 222 extern IA64FAULT vmx_vcpu_set_tpr(VCPU * vcpu, u64 val);
awilliam@11817 223
awilliam@11817 224 static inline IA64FAULT vmx_vcpu_set_eoi(VCPU * vcpu, u64 val)
awilliam@11817 225 {
awilliam@11817 226 guest_write_eoi(vcpu);
awilliam@11817 227 return IA64_NO_FAULT;
awilliam@11817 228 }
awilliam@11817 229
awilliam@11817 230 static inline IA64FAULT vmx_vcpu_set_itv(VCPU * vcpu, u64 val)
adsharma@4993 231 {
adsharma@4993 232
awilliam@11817 233 vtm_set_itv(vcpu, val);
awilliam@11817 234 return IA64_NO_FAULT;
adsharma@4993 235 }
adsharma@4993 236
awilliam@11817 237 static inline IA64FAULT vmx_vcpu_set_pmv(VCPU * vcpu, u64 val)
awilliam@11817 238 {
awilliam@11817 239 VCPU(vcpu, pmv) = val;
awilliam@11817 240 return IA64_NO_FAULT;
awilliam@11817 241 }
adsharma@4993 242
awilliam@11817 243 static inline IA64FAULT vmx_vcpu_set_cmcv(VCPU * vcpu, u64 val)
awilliam@11817 244 {
awilliam@11817 245 VCPU(vcpu, cmcv) = val;
awilliam@11817 246 return IA64_NO_FAULT;
awilliam@11817 247 }
adsharma@4993 248
awilliam@11817 249 static inline IA64FAULT vmx_vcpu_set_lrr0(VCPU * vcpu, u64 val)
awilliam@11817 250 {
awilliam@11817 251 VCPU(vcpu, lrr0) = val;
awilliam@11817 252 return IA64_NO_FAULT;
awilliam@11817 253 }
awilliam@11817 254
awilliam@11817 255 static inline IA64FAULT vmx_vcpu_set_lrr1(VCPU * vcpu, u64 val)
awilliam@11817 256 {
awilliam@11817 257 VCPU(vcpu, lrr1) = val;
awilliam@11817 258 return IA64_NO_FAULT;
awilliam@11817 259 }
adsharma@4993 260
adsharma@4993 261 /**************************************************************************
adsharma@4993 262 VCPU privileged application register access routines
adsharma@4993 263 **************************************************************************/
awilliam@11817 264 static inline IA64FAULT vmx_vcpu_set_itc(VCPU * vcpu, u64 val)
adsharma@4993 265 {
awilliam@11817 266 vtm_set_itc(vcpu, val);
awilliam@11817 267 return IA64_NO_FAULT;
adsharma@4993 268 }
awilliam@11817 269
alex@15009 270 static inline u64 vmx_vcpu_get_itc(VCPU * vcpu)
adsharma@4993 271 {
alex@15009 272 return ((u64)vtm_get_itc(vcpu));
adsharma@4993 273 }
awilliam@11817 274
awilliam@9164 275 /*
adsharma@4993 276 static inline
awilliam@11817 277 IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, u64 reg, u64 *pval)
adsharma@4993 278 {
adsharma@4993 279 *pval = VMX(vcpu,vrr[reg>>61]);
awilliam@11817 280 return IA64_NO_FAULT;
adsharma@4993 281 }
awilliam@9164 282 */
adsharma@4993 283 /**************************************************************************
adsharma@4993 284 VCPU debug breakpoint register access routines
adsharma@4993 285 **************************************************************************/
adsharma@4993 286
alex@15009 287 static inline u64 vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg)
adsharma@4993 288 {
awilliam@11817 289 // TODO: unimplemented DBRs return a reserved register fault
awilliam@11817 290 // TODO: Should set Logical CPU state, not just physical
awilliam@11817 291 if (reg > 4) {
awilliam@11817 292 panic_domain(vcpu_regs(vcpu),
awilliam@11817 293 "there are only five cpuid registers");
awilliam@11817 294 }
alex@15009 295 return ((u64)VCPU(vcpu, vcpuid[reg]));
adsharma@4993 296 }
adsharma@4993 297
awilliam@11817 298 static inline IA64FAULT vmx_vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
adsharma@4993 299 {
alex@15419 300 return vcpu_set_dbr(vcpu, reg, val);
adsharma@4993 301 }
awilliam@11817 302
awilliam@11817 303 static inline IA64FAULT vmx_vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val)
adsharma@4993 304 {
alex@15419 305 return vcpu_set_ibr(vcpu, reg, val);
adsharma@4993 306 }
awilliam@11817 307
alex@15419 308 static inline IA64FAULT vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 *pval)
adsharma@4993 309 {
alex@15419 310 return vcpu_get_dbr(vcpu, reg, pval);
awilliam@11817 311 }
awilliam@11817 312
alex@15419 313 static inline IA64FAULT vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 *pval)
awilliam@11817 314 {
alex@15419 315 return vcpu_get_ibr(vcpu, reg, pval);
adsharma@4993 316 }
adsharma@4993 317
adsharma@4993 318 /**************************************************************************
adsharma@4993 319 VCPU performance monitor register access routines
adsharma@4993 320 **************************************************************************/
awilliam@11817 321 static inline IA64FAULT vmx_vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val)
adsharma@4993 322 {
awilliam@11817 323 // TODO: Should set Logical CPU state, not just physical
awilliam@11817 324 // NOTE: Writes to unimplemented PMC registers are discarded
awilliam@11817 325 ia64_set_pmc(reg, val);
awilliam@11817 326 return IA64_NO_FAULT;
adsharma@4993 327 }
awilliam@11817 328
awilliam@11817 329 static inline IA64FAULT vmx_vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val)
adsharma@4993 330 {
awilliam@11817 331 // TODO: Should set Logical CPU state, not just physical
awilliam@11817 332 // NOTE: Writes to unimplemented PMD registers are discarded
awilliam@11817 333 ia64_set_pmd(reg, val);
awilliam@11817 334 return IA64_NO_FAULT;
adsharma@4993 335 }
awilliam@11817 336
alex@15009 337 static inline u64 vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg)
adsharma@4993 338 {
awilliam@11817 339 // NOTE: Reads from unimplemented PMC registers return zero
alex@15009 340 return ((u64)ia64_get_pmc(reg));
awilliam@11817 341 }
awilliam@11817 342
alex@15009 343 static inline u64 vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg)
awilliam@11817 344 {
awilliam@11817 345 // NOTE: Reads from unimplemented PMD registers return zero
alex@15009 346 return ((u64)ia64_get_pmd(reg));
adsharma@4993 347 }
adsharma@4993 348
adsharma@4993 349 /**************************************************************************
adsharma@4993 350 VCPU banked general register access routines
adsharma@4993 351 **************************************************************************/
djm@6867 352 #if 0
awilliam@11817 353 static inline IA64FAULT vmx_vcpu_bsw0(VCPU * vcpu)
adsharma@4993 354 {
adsharma@4993 355
awilliam@11817 356 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
awilliam@11817 357 return IA64_NO_FAULT;
adsharma@4993 358 }
awilliam@11817 359
awilliam@11817 360 static inline IA64FAULT vmx_vcpu_bsw1(VCPU * vcpu)
adsharma@4993 361 {
adsharma@4993 362
awilliam@11817 363 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
awilliam@11817 364 return IA64_NO_FAULT;
adsharma@4993 365 }
djm@6867 366 #endif
djm@6469 367 #if 0
fred@5986 368 /* Another hash performance algorithm */
adsharma@4993 369 #define redistribute_rid(rid) (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
djm@6469 370 #endif
awilliam@11817 371 static inline unsigned long vrrtomrr(VCPU * v, unsigned long val)
adsharma@4993 372 {
awilliam@11817 373 ia64_rr rr;
fred@5986 374
awilliam@11817 375 rr.rrval = val;
awilliam@11817 376 rr.rid = rr.rid + v->arch.starting_rid;
awilliam@11817 377 if (rr.ps > PAGE_SHIFT)
awilliam@11817 378 rr.ps = PAGE_SHIFT;
awilliam@11817 379 rr.ve = 1;
awilliam@11817 380 return vmMangleRID(rr.rrval);
fred@5986 381 /* Disable this rid allocation algorithm for now */
fred@5986 382 #if 0
awilliam@11817 383 rid = (((u64) vcpu->domain->domain_id) << DOMAIN_RID_SHIFT) + rr.rid;
awilliam@11817 384 rr.rid = redistribute_rid(rid);
awilliam@11817 385 #endif
fred@5986 386
adsharma@4993 387 }
awilliam@11817 388 static inline thash_cb_t *vmx_vcpu_get_vtlb(VCPU * vcpu)
awilliam@9765 389 {
awilliam@11817 390 return &vcpu->arch.vtlb;
awilliam@9765 391 }
awilliam@9765 392
awilliam@11817 393 static inline thash_cb_t *vcpu_get_vhpt(VCPU * vcpu)
awilliam@9765 394 {
awilliam@11817 395 return &vcpu->arch.vhpt;
awilliam@9765 396 }
kaf24@8386 397
alex@17068 398
alex@17068 399 /**************************************************************************
alex@17068 400 VCPU fault injection routines
alex@17068 401 **************************************************************************/
alex@17068 402
alex@17068 403 /*
alex@17068 404 * Set vIFA & vITIR & vIHA, when vPSR.ic =1
alex@17068 405 * Parameter:
alex@17068 406 * set_ifa: if true, set vIFA
alex@17068 407 * set_itir: if true, set vITIR
alex@17068 408 * set_iha: if true, set vIHA
alex@17068 409 */
alex@17068 410 static inline void
alex@17068 411 set_ifa_itir_iha (VCPU *vcpu, u64 vadr,
alex@17068 412 int set_ifa, int set_itir, int set_iha)
alex@17068 413 {
alex@17068 414 IA64_PSR vpsr;
alex@17068 415 u64 value;
alex@17068 416 vpsr.val = VCPU(vcpu, vpsr);
alex@17068 417 /* Vol2, Table 8-1 */
alex@17068 418 if (vpsr.ic) {
alex@17068 419 if (set_ifa){
alex@17068 420 vcpu_set_ifa(vcpu, vadr);
alex@17068 421 }
alex@17068 422 if (set_itir) {
alex@17068 423 value = vmx_vcpu_get_itir_on_fault(vcpu, vadr);
alex@17068 424 vcpu_set_itir(vcpu, value);
alex@17068 425 }
alex@17068 426 if (set_iha) {
alex@17068 427 value = vmx_vcpu_thash(vcpu, vadr);
alex@17068 428 vcpu_set_iha(vcpu, value);
alex@17068 429 }
alex@17068 430 }
alex@17068 431 }
alex@17068 432
alex@17068 433 /*
alex@17068 434 * Data TLB Fault
alex@17068 435 * @ Data TLB vector
alex@17068 436 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 437 */
alex@17068 438 static inline void
alex@17068 439 dtlb_fault (VCPU *vcpu, u64 vadr)
alex@17068 440 {
alex@17068 441 /* If vPSR.ic, IFA, ITIR, IHA */
alex@17068 442 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
alex@17068 443 inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
alex@17068 444 }
alex@17068 445
alex@17068 446 /*
alex@17068 447 * Instruction TLB Fault
alex@17068 448 * @ Instruction TLB vector
alex@17068 449 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 450 */
alex@17068 451 static inline void
alex@17068 452 itlb_fault (VCPU *vcpu, u64 vadr)
alex@17068 453 {
alex@17068 454 /* If vPSR.ic, IFA, ITIR, IHA */
alex@17068 455 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
alex@17068 456 inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
alex@17068 457 }
alex@17068 458
alex@17068 459 /*
alex@17068 460 * Data Nested TLB Fault
alex@17068 461 * @ Data Nested TLB Vector
alex@17068 462 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 463 */
alex@17068 464 static inline void
alex@17068 465 nested_dtlb (VCPU *vcpu)
alex@17068 466 {
alex@17068 467 inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
alex@17068 468 }
alex@17068 469
alex@17068 470 /*
alex@17068 471 * Alternate Data TLB Fault
alex@17068 472 * @ Alternate Data TLB vector
alex@17068 473 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 474 */
alex@17068 475 static inline void
alex@17068 476 alt_dtlb (VCPU *vcpu, u64 vadr)
alex@17068 477 {
alex@17068 478 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
alex@17068 479 inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
alex@17068 480 }
alex@17068 481
alex@17068 482 /*
alex@17068 483 * Data TLB Fault
alex@17068 484 * @ Data TLB vector
alex@17068 485 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 486 */
alex@17068 487 static inline void
alex@17068 488 alt_itlb (VCPU *vcpu, u64 vadr)
alex@17068 489 {
alex@17068 490 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
alex@17068 491 inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
alex@17068 492 }
alex@17068 493
alex@17068 494 /*
alex@17068 495 * Deal with:
alex@17068 496 * VHPT Translation Vector
alex@17068 497 */
alex@17068 498 static inline void
alex@17068 499 _vhpt_fault(VCPU *vcpu, u64 vadr)
alex@17068 500 {
alex@17068 501 /* If vPSR.ic, IFA, ITIR, IHA*/
alex@17068 502 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
alex@17068 503 inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
alex@17068 504 }
alex@17068 505
alex@17068 506 /*
alex@17068 507 * VHPT Instruction Fault
alex@17068 508 * @ VHPT Translation vector
alex@17068 509 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 510 */
alex@17068 511 static inline void
alex@17068 512 ivhpt_fault (VCPU *vcpu, u64 vadr)
alex@17068 513 {
alex@17068 514 _vhpt_fault(vcpu, vadr);
alex@17068 515 }
alex@17068 516
alex@17068 517 /*
alex@17068 518 * VHPT Data Fault
alex@17068 519 * @ VHPT Translation vector
alex@17068 520 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 521 */
alex@17068 522 static inline void
alex@17068 523 dvhpt_fault (VCPU *vcpu, u64 vadr)
alex@17068 524 {
alex@17068 525 _vhpt_fault(vcpu, vadr);
alex@17068 526 }
alex@17068 527
alex@17068 528 /*
alex@17068 529 * Deal with:
alex@17068 530 * General Exception vector
alex@17068 531 */
alex@17068 532 static inline void
alex@17068 533 _general_exception (VCPU *vcpu)
alex@17068 534 {
alex@17068 535 inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
alex@17068 536 }
alex@17068 537
alex@17068 538 /*
alex@17068 539 * Illegal Operation Fault
alex@17068 540 * @ General Exception Vector
alex@17068 541 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 542 */
alex@17068 543 static inline void
alex@17068 544 illegal_op (VCPU *vcpu)
alex@17068 545 {
alex@17068 546 _general_exception(vcpu);
alex@17068 547 }
alex@17068 548
alex@17068 549 /*
alex@17068 550 * Illegal Dependency Fault
alex@17068 551 * @ General Exception Vector
alex@17068 552 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 553 */
alex@17068 554 static inline void
alex@17068 555 illegal_dep (VCPU *vcpu)
alex@17068 556 {
alex@17068 557 _general_exception(vcpu);
alex@17068 558 }
alex@17068 559
alex@17068 560 /*
alex@17068 561 * Reserved Register/Field Fault
alex@17068 562 * @ General Exception Vector
alex@17068 563 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 564 */
alex@17068 565 static inline void
alex@17068 566 rsv_reg_field (VCPU *vcpu)
alex@17068 567 {
alex@17068 568 _general_exception(vcpu);
alex@17068 569 }
alex@17068 570
alex@17068 571 /*
alex@17068 572 * Privileged Operation Fault
alex@17068 573 * @ General Exception Vector
alex@17068 574 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 575 */
alex@17068 576 static inline void
alex@17068 577 privilege_op (VCPU *vcpu)
alex@17068 578 {
alex@17068 579 _general_exception(vcpu);
alex@17068 580 }
alex@17068 581
alex@17068 582 /*
alex@17068 583 * Unimplement Data Address Fault
alex@17068 584 * @ General Exception Vector
alex@17068 585 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 586 */
alex@17068 587 static inline void
alex@17068 588 unimpl_daddr (VCPU *vcpu)
alex@17068 589 {
alex@17209 590 ISR isr;
alex@17209 591
alex@17209 592 isr.val = set_isr_ei_ni(vcpu);
alex@17209 593 isr.code = IA64_UNIMPL_DADDR_FAULT;
alex@17209 594 vcpu_set_isr(vcpu, isr.val);
alex@17068 595 _general_exception(vcpu);
alex@17068 596 }
alex@17068 597
alex@17068 598 /*
alex@17068 599 * Privileged Register Fault
alex@17068 600 * @ General Exception Vector
alex@17068 601 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 602 */
alex@17068 603 static inline void
alex@17068 604 privilege_reg (VCPU *vcpu)
alex@17068 605 {
alex@17068 606 _general_exception(vcpu);
alex@17068 607 }
alex@17068 608
alex@17068 609 /*
alex@17068 610 * Deal with
alex@17068 611 * Nat consumption vector
alex@17068 612 * Parameter:
alex@17068 613 * vaddr: Optional, if t == REGISTER
alex@17068 614 */
alex@17068 615 static inline void
alex@17068 616 _nat_consumption_fault(VCPU *vcpu, u64 vadr, miss_type t)
alex@17068 617 {
alex@17068 618 /* If vPSR.ic && t == DATA/INST, IFA */
alex@17068 619 if ( t == DATA || t == INSTRUCTION ) {
alex@17068 620 /* IFA */
alex@17068 621 set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
alex@17068 622 }
alex@17068 623
alex@17068 624 inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
alex@17068 625 }
alex@17068 626
alex@17068 627 /*
alex@17068 628 * IR Data Nat Page Consumption Fault
alex@17068 629 * @ Nat Consumption Vector
alex@17068 630 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 631 */
alex@17068 632 #if 0
alex@17068 633 static inline void
alex@17068 634 ir_nat_page_consumption (VCPU *vcpu, u64 vadr)
alex@17068 635 {
alex@17068 636 _nat_consumption_fault(vcpu, vadr, DATA);
alex@17068 637 }
alex@17068 638 #endif //shadow it due to no use currently
alex@17068 639
alex@17068 640 /*
alex@17068 641 * Instruction Nat Page Consumption Fault
alex@17068 642 * @ Nat Consumption Vector
alex@17068 643 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 644 */
alex@17068 645 static inline void
alex@17068 646 inat_page_consumption (VCPU *vcpu, u64 vadr)
alex@17068 647 {
alex@17068 648 _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
alex@17068 649 }
alex@17068 650
alex@17068 651 /*
alex@17068 652 * Register Nat Consumption Fault
alex@17068 653 * @ Nat Consumption Vector
alex@17068 654 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 655 */
alex@17068 656 static inline void
alex@17068 657 rnat_consumption (VCPU *vcpu)
alex@17068 658 {
alex@17068 659 _nat_consumption_fault(vcpu, 0, REGISTER);
alex@17068 660 }
alex@17068 661
alex@17068 662 /*
alex@17068 663 * Data Nat Page Consumption Fault
alex@17068 664 * @ Nat Consumption Vector
alex@17068 665 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17068 666 */
alex@17068 667 static inline void
alex@17068 668 dnat_page_consumption (VCPU *vcpu, uint64_t vadr)
alex@17068 669 {
alex@17068 670 _nat_consumption_fault(vcpu, vadr, DATA);
alex@17068 671 }
alex@17068 672
alex@17068 673 /*
alex@17068 674 * Deal with
alex@17068 675 * Page not present vector
alex@17068 676 */
alex@17068 677 static inline void
alex@17068 678 __page_not_present(VCPU *vcpu, u64 vadr)
alex@17068 679 {
alex@17068 680 /* If vPSR.ic, IFA, ITIR */
alex@17068 681 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
alex@17068 682 inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
alex@17068 683 }
alex@17068 684
alex@17068 685 static inline void
alex@17068 686 data_page_not_present(VCPU *vcpu, u64 vadr)
alex@17068 687 {
alex@17068 688 __page_not_present(vcpu, vadr);
alex@17068 689 }
alex@17068 690
alex@17068 691 static inline void
alex@17068 692 inst_page_not_present(VCPU *vcpu, u64 vadr)
alex@17068 693 {
alex@17068 694 __page_not_present(vcpu, vadr);
alex@17068 695 }
alex@17068 696
alex@17068 697 /*
alex@17068 698 * Deal with
alex@17068 699 * Data access rights vector
alex@17068 700 */
alex@17068 701 static inline void
alex@17068 702 data_access_rights(VCPU *vcpu, u64 vadr)
alex@17068 703 {
alex@17068 704 /* If vPSR.ic, IFA, ITIR */
alex@17068 705 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
alex@17068 706 inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
alex@17068 707 }
alex@17209 708
alex@17209 709 /*
alex@17209 710 * Unimplement Instruction Address Trap
alex@17209 711 * @ Lower-Privilege Transfer Trap Vector
alex@17209 712 * Refer to SDM Vol2 Table 5-6 & 8-1
alex@17209 713 */
alex@17209 714 static inline void
alex@17209 715 unimpl_iaddr_trap (VCPU *vcpu, u64 vadr)
alex@17209 716 {
alex@17209 717 ISR isr;
alex@17209 718
alex@17209 719 isr.val = set_isr_ei_ni(vcpu);
alex@17209 720 isr.code = IA64_UNIMPL_IADDR_TRAP;
alex@17209 721 vcpu_set_isr(vcpu, isr.val);
alex@17209 722 vcpu_set_ifa(vcpu, vadr);
alex@17209 723 inject_guest_interruption(vcpu, IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR);
alex@17209 724 }
adsharma@4993 725 #endif