ia64/xen-unstable

annotate xen/arch/ia64/xen/vcpu.c @ 10692:306d7857928c

[IA64] Save & restore.

xc_ia64_linux_save.c and xc_ia64_linux_restore.c added.
vcpu context has more registers and states (eg: tr registers).
Per cpu irqs are deallocated when cpu is switched off.
#if/#endif added in reboot.c for ia64.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Tue Jul 11 12:51:18 2006 -0600 (2006-07-11)
parents bdc0258e162a
children 000789c36d28
rev   line source
djm@6458 1 /*
djm@6458 2 * Virtualized CPU functions
djm@6458 3 *
djm@6468 4 * Copyright (C) 2004-2005 Hewlett-Packard Co.
djm@6458 5 * Dan Magenheimer (dan.magenheimer@hp.com)
djm@6458 6 *
djm@6458 7 */
djm@6458 8
djm@6458 9 #include <linux/sched.h>
djm@6458 10 #include <public/arch-ia64.h>
djm@6458 11 #include <asm/ia64_int.h>
djm@6458 12 #include <asm/vcpu.h>
djm@6458 13 #include <asm/regionreg.h>
djm@6458 14 #include <asm/tlb.h>
djm@6458 15 #include <asm/processor.h>
djm@6458 16 #include <asm/delay.h>
djm@6458 17 #include <asm/vmx_vcpu.h>
awilliam@9005 18 #include <asm/vhpt.h>
awilliam@9005 19 #include <asm/tlbflush.h>
awilliam@10236 20 #include <asm/privop.h>
djm@6868 21 #include <xen/event.h>
awilliam@10204 22 #include <asm/vmx_phy_mode.h>
awilliam@10561 23 #include <asm/bundle.h>
awilliam@10561 24 #include <asm/privop_stat.h>
djm@6458 25
awilliam@9005 26 /* FIXME: where these declarations should be there ? */
awilliam@9005 27 extern void getreg(unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs);
awilliam@9005 28 extern void setreg(unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs);
awilliam@9742 29 extern void getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs);
awilliam@9742 30
awilliam@9764 31 extern void setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs);
awilliam@9764 32
awilliam@9005 33 extern void panic_domain(struct pt_regs *, const char *, ...);
awilliam@10236 34 extern IA64_BUNDLE __get_domain_bundle(UINT64);
awilliam@9005 35
djm@6458 36 typedef union {
djm@6458 37 struct ia64_psr ia64_psr;
djm@6458 38 unsigned long i64;
djm@6458 39 } PSR;
djm@6458 40
djm@6458 41 // this def for vcpu_regs won't work if kernel stack is present
djm@6867 42 //#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
djm@6458 43
djm@6458 44 #define TRUE 1
djm@6458 45 #define FALSE 0
djm@6458 46 #define IA64_PTA_SZ_BIT 2
djm@6458 47 #define IA64_PTA_VF_BIT 8
djm@6458 48 #define IA64_PTA_BASE_BIT 15
djm@6458 49 #define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
djm@6458 50 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
djm@6458 51
djm@6458 52 unsigned long vcpu_verbose = 0;
djm@6458 53
djm@6458 54 /**************************************************************************
djm@6458 55 VCPU general register access routines
djm@6458 56 **************************************************************************/
djm@6867 57 #ifdef XEN
djm@6458 58 UINT64
awilliam@8833 59 vcpu_get_gr(VCPU *vcpu, unsigned long reg)
djm@6458 60 {
djm@6458 61 REGS *regs = vcpu_regs(vcpu);
djm@6458 62 UINT64 val;
awilliam@9314 63
djm@6458 64 if (!reg) return 0;
djm@6458 65 getreg(reg,&val,0,regs); // FIXME: handle NATs later
djm@6458 66 return val;
djm@6458 67 }
djm@6867 68 IA64FAULT
awilliam@8833 69 vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val)
djm@6867 70 {
djm@6867 71 REGS *regs = vcpu_regs(vcpu);
awilliam@9314 72 int nat;
awilliam@9314 73
djm@6867 74 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
awilliam@9314 75 if (nat)
awilliam@9314 76 return IA64_NAT_CONSUMPTION_VECTOR;
djm@6867 77 return 0;
djm@6867 78 }
djm@6458 79
djm@6458 80 // returns:
djm@6458 81 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
djm@6458 82 // IA64_NO_FAULT otherwise
djm@6458 83 IA64FAULT
awilliam@8833 84 vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int nat)
djm@6867 85 {
djm@6867 86 REGS *regs = vcpu_regs(vcpu);
awilliam@9315 87 long sof = (regs->cr_ifs) & 0x7f;
awilliam@9314 88
djm@6867 89 if (!reg) return IA64_ILLOP_FAULT;
djm@6867 90 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
djm@6867 91 setreg(reg,value,nat,regs); // FIXME: handle NATs later
djm@6867 92 return IA64_NO_FAULT;
djm@6867 93 }
awilliam@9742 94
awilliam@9742 95 IA64FAULT
awilliam@9742 96 vcpu_get_fpreg(VCPU *vcpu, unsigned long reg, struct ia64_fpreg *val)
awilliam@9742 97 {
awilliam@9742 98 REGS *regs = vcpu_regs(vcpu);
awilliam@9742 99 getfpreg(reg,val,regs); // FIXME: handle NATs later
awilliam@9764 100 return IA64_NO_FAULT;
awilliam@9764 101 }
awilliam@9764 102
awilliam@9764 103 IA64FAULT
awilliam@9764 104 vcpu_set_fpreg(VCPU *vcpu, unsigned long reg, struct ia64_fpreg *val)
awilliam@9764 105 {
awilliam@9764 106 REGS *regs = vcpu_regs(vcpu);
awilliam@9764 107 if(reg > 1)
awilliam@9764 108 setfpreg(reg,val,regs); // FIXME: handle NATs later
awilliam@9764 109 return IA64_NO_FAULT;
awilliam@9742 110 }
awilliam@9742 111
djm@6867 112 #else
djm@6867 113 // returns:
djm@6867 114 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
djm@6867 115 // IA64_NO_FAULT otherwise
djm@6867 116 IA64FAULT
awilliam@8833 117 vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value)
djm@6458 118 {
djm@6458 119 REGS *regs = vcpu_regs(vcpu);
djm@6458 120 long sof = (regs->cr_ifs) & 0x7f;
djm@6458 121
djm@6458 122 if (!reg) return IA64_ILLOP_FAULT;
djm@6458 123 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
djm@6458 124 setreg(reg,value,0,regs); // FIXME: handle NATs later
djm@6458 125 return IA64_NO_FAULT;
djm@6458 126 }
djm@6458 127
djm@6867 128 #endif
awilliam@10246 129
awilliam@10246 130 void vcpu_init_regs (struct vcpu *v)
awilliam@10246 131 {
awilliam@10246 132 struct pt_regs *regs;
awilliam@10246 133
awilliam@10246 134 regs = vcpu_regs (v);
awilliam@10246 135 if (VMX_DOMAIN(v)) {
awilliam@10246 136 /* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
awilliam@10246 137 /* Need to be expanded as macro */
awilliam@10246 138 regs->cr_ipsr = 0x501008826008;
awilliam@10246 139 } else {
awilliam@10246 140 regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
awilliam@10246 141 | IA64_PSR_BITS_TO_SET | IA64_PSR_BN;
awilliam@10246 142 regs->cr_ipsr &= ~(IA64_PSR_BITS_TO_CLEAR
awilliam@10246 143 | IA64_PSR_RI | IA64_PSR_IS);
awilliam@10246 144 // domain runs at PL2
awilliam@10246 145 regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
awilliam@10246 146 }
awilliam@10246 147 regs->cr_ifs = 1UL << 63; /* or clear? */
awilliam@10246 148 regs->ar_fpsr = FPSR_DEFAULT;
awilliam@10246 149
awilliam@10246 150 if (VMX_DOMAIN(v)) {
awilliam@10246 151 vmx_init_all_rr(v);
awilliam@10246 152 /* Virtual processor context setup */
awilliam@10246 153 VCPU(v, vpsr) = IA64_PSR_BN;
awilliam@10246 154 VCPU(v, dcr) = 0;
awilliam@10246 155 } else {
awilliam@10246 156 init_all_rr(v);
awilliam@10246 157 regs->ar_rsc |= (2 << 2); /* force PL2/3 */
awilliam@10246 158 VCPU(v, banknum) = 1;
awilliam@10246 159 VCPU(v, metaphysical_mode) = 1;
awilliam@10246 160 VCPU(v, interrupt_mask_addr) =
awilliam@10437 161 (unsigned char *)v->domain->arch.shared_info_va +
awilliam@10437 162 INT_ENABLE_OFFSET(v);
awilliam@10246 163 VCPU(v, itv) = (1 << 16); /* timer vector masked */
awilliam@10246 164 }
awilliam@10246 165
awilliam@10246 166 v->arch.domain_itm_last = -1L;
awilliam@10246 167 }
awilliam@10246 168
djm@6458 169 /**************************************************************************
djm@6458 170 VCPU privileged application register access routines
djm@6458 171 **************************************************************************/
djm@6458 172
djm@7504 173 void vcpu_load_kernel_regs(VCPU *vcpu)
djm@7504 174 {
djm@7504 175 ia64_set_kr(0, VCPU(vcpu, krs[0]));
djm@7504 176 ia64_set_kr(1, VCPU(vcpu, krs[1]));
djm@7504 177 ia64_set_kr(2, VCPU(vcpu, krs[2]));
djm@7504 178 ia64_set_kr(3, VCPU(vcpu, krs[3]));
djm@7504 179 ia64_set_kr(4, VCPU(vcpu, krs[4]));
djm@7504 180 ia64_set_kr(5, VCPU(vcpu, krs[5]));
djm@7504 181 ia64_set_kr(6, VCPU(vcpu, krs[6]));
djm@7504 182 ia64_set_kr(7, VCPU(vcpu, krs[7]));
djm@7504 183 }
djm@7504 184
djm@7733 185 /* GCC 4.0.2 seems not to be able to suppress this call!. */
djm@7733 186 #define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
djm@7733 187
djm@6458 188 IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
djm@6458 189 {
djm@6458 190 if (reg == 44) return (vcpu_set_itc(vcpu,val));
djm@6458 191 else if (reg == 27) return (IA64_ILLOP_FAULT);
djm@6458 192 else if (reg == 24)
djm@6458 193 printf("warning: setting ar.eflg is a no-op; no IA-32 support\n");
djm@6458 194 else if (reg > 7) return (IA64_ILLOP_FAULT);
djm@6877 195 else {
djm@6877 196 PSCB(vcpu,krs[reg]) = val;
djm@6877 197 ia64_set_kr(reg,val);
djm@6877 198 }
djm@6458 199 return IA64_NO_FAULT;
djm@6458 200 }
djm@6458 201
djm@6458 202 IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
djm@6458 203 {
djm@6458 204 if (reg == 24)
djm@6458 205 printf("warning: getting ar.eflg is a no-op; no IA-32 support\n");
djm@6458 206 else if (reg > 7) return (IA64_ILLOP_FAULT);
djm@6458 207 else *val = PSCB(vcpu,krs[reg]);
djm@6458 208 return IA64_NO_FAULT;
djm@6458 209 }
djm@6458 210
djm@6458 211 /**************************************************************************
djm@6458 212 VCPU processor status register access routines
djm@6458 213 **************************************************************************/
djm@6458 214
djm@6458 215 void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
djm@6458 216 {
djm@6458 217 /* only do something if mode changes */
djm@6458 218 if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
awilliam@9478 219 PSCB(vcpu,metaphysical_mode) = newmode;
djm@6458 220 if (newmode) set_metaphysical_rr0();
djm@6458 221 else if (PSCB(vcpu,rrs[0]) != -1)
djm@6458 222 set_one_rr(0, PSCB(vcpu,rrs[0]));
djm@6458 223 }
djm@6458 224 }
djm@6458 225
djm@6458 226 IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu)
djm@6458 227 {
djm@6458 228 vcpu_set_metaphysical_mode(vcpu,TRUE);
djm@6458 229 return IA64_NO_FAULT;
djm@6458 230 }
djm@6458 231
djm@6458 232 IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
djm@6458 233 {
djm@6458 234 struct ia64_psr psr, imm, *ipsr;
djm@6458 235 REGS *regs = vcpu_regs(vcpu);
djm@6458 236
djm@6458 237 //PRIVOP_COUNT_ADDR(regs,_RSM);
djm@6458 238 // TODO: All of these bits need to be virtualized
djm@6458 239 // TODO: Only allowed for current vcpu
djm@6458 240 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
djm@6458 241 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
djm@6458 242 imm = *(struct ia64_psr *)&imm24;
djm@6458 243 // interrupt flag
awilliam@9479 244 if (imm.i)
awilliam@9479 245 vcpu->vcpu_info->evtchn_upcall_mask = 1;
djm@6458 246 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0;
djm@6458 247 // interrupt collection flag
djm@6458 248 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
djm@6458 249 // just handle psr.up and psr.pp for now
djm@6458 250 if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
djm@6458 251 | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
djm@6458 252 | IA64_PSR_DFL | IA64_PSR_DFH))
djm@6458 253 return (IA64_ILLOP_FAULT);
djm@6458 254 if (imm.dfh) ipsr->dfh = 0;
djm@6458 255 if (imm.dfl) ipsr->dfl = 0;
djm@6869 256 if (imm.pp) {
djm@6869 257 ipsr->pp = 1;
djm@6869 258 psr.pp = 1; // priv perf ctrs always enabled
awilliam@10433 259 PSCB(vcpu,vpsr_pp) = 0; // but fool the domain if it gets psr
djm@6869 260 }
djm@6458 261 if (imm.up) { ipsr->up = 0; psr.up = 0; }
djm@6458 262 if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
djm@6458 263 if (imm.be) ipsr->be = 0;
djm@6458 264 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
djm@6458 265 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
djm@6458 266 return IA64_NO_FAULT;
djm@6458 267 }
djm@6458 268
djm@6458 269
djm@6458 270 IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
djm@6458 271 {
djm@6458 272 vcpu_set_metaphysical_mode(vcpu,FALSE);
djm@6458 273 return IA64_NO_FAULT;
djm@6458 274 }
djm@6458 275
djm@6458 276 IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
djm@6458 277 {
awilliam@9479 278 vcpu->vcpu_info->evtchn_upcall_mask = 0;
djm@6458 279 PSCB(vcpu,interrupt_collection_enabled) = 1;
djm@6458 280 return IA64_NO_FAULT;
djm@6458 281 }
djm@6458 282
djm@6458 283 IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
djm@6458 284 {
djm@6458 285 struct ia64_psr psr, imm, *ipsr;
djm@6458 286 REGS *regs = vcpu_regs(vcpu);
djm@6458 287 UINT64 mask, enabling_interrupts = 0;
djm@6458 288
djm@6458 289 //PRIVOP_COUNT_ADDR(regs,_SSM);
djm@6458 290 // TODO: All of these bits need to be virtualized
djm@6458 291 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
djm@6458 292 imm = *(struct ia64_psr *)&imm24;
djm@6458 293 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
djm@6458 294 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
djm@6458 295 mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
djm@6458 296 IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
djm@6458 297 if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
djm@6458 298 if (imm.dfh) ipsr->dfh = 1;
djm@6458 299 if (imm.dfl) ipsr->dfl = 1;
djm@6869 300 if (imm.pp) {
awilliam@10433 301 ipsr->pp = 1;
awilliam@10433 302 psr.pp = 1;
awilliam@10433 303 PSCB(vcpu,vpsr_pp) = 1;
djm@6869 304 }
djm@6458 305 if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
djm@6458 306 if (imm.i) {
awilliam@9479 307 if (vcpu->vcpu_info->evtchn_upcall_mask) {
awilliam@10242 308 //printf("vcpu_set_psr_sm: psr.ic 0->1\n");
djm@6458 309 enabling_interrupts = 1;
djm@6458 310 }
awilliam@9479 311 vcpu->vcpu_info->evtchn_upcall_mask = 0;
djm@6458 312 }
djm@6458 313 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
djm@6458 314 // TODO: do this faster
djm@6458 315 if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
djm@6458 316 if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
djm@6458 317 if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
djm@6458 318 if (imm.up) { ipsr->up = 1; psr.up = 1; }
djm@6458 319 if (imm.be) {
djm@6458 320 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
djm@6458 321 return (IA64_ILLOP_FAULT);
djm@6458 322 }
djm@6458 323 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
djm@6458 324 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
djm@6458 325 if (enabling_interrupts &&
djm@6458 326 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
djm@6458 327 PSCB(vcpu,pending_interruption) = 1;
djm@6458 328 return IA64_NO_FAULT;
djm@6458 329 }
djm@6458 330
djm@6458 331 IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
djm@6458 332 {
djm@6458 333 struct ia64_psr psr, newpsr, *ipsr;
djm@6458 334 REGS *regs = vcpu_regs(vcpu);
djm@6458 335 UINT64 enabling_interrupts = 0;
djm@6458 336
djm@6458 337 // TODO: All of these bits need to be virtualized
djm@6458 338 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
djm@6458 339 newpsr = *(struct ia64_psr *)&val;
djm@6458 340 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
djm@6458 341 // just handle psr.up and psr.pp for now
djm@6458 342 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
djm@6458 343 // however trying to set other bits can't be an error as it is in ssm
djm@6458 344 if (newpsr.dfh) ipsr->dfh = 1;
djm@6458 345 if (newpsr.dfl) ipsr->dfl = 1;
djm@6869 346 if (newpsr.pp) {
djm@6869 347 ipsr->pp = 1; psr.pp = 1;
awilliam@10433 348 PSCB(vcpu,vpsr_pp) = 1;
djm@6869 349 }
djm@6869 350 else {
djm@6869 351 ipsr->pp = 1; psr.pp = 1;
awilliam@10433 352 PSCB(vcpu,vpsr_pp) = 0;
djm@6869 353 }
djm@6458 354 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
djm@6458 355 if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
djm@6458 356 if (newpsr.i) {
awilliam@9479 357 if (vcpu->vcpu_info->evtchn_upcall_mask)
djm@6458 358 enabling_interrupts = 1;
awilliam@9479 359 vcpu->vcpu_info->evtchn_upcall_mask = 0;
djm@6458 360 }
djm@6458 361 if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
djm@6458 362 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
djm@6458 363 if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
djm@6458 364 if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
djm@6458 365 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
djm@6458 366 if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
djm@6458 367 else vcpu_set_metaphysical_mode(vcpu,TRUE);
djm@6458 368 if (newpsr.be) {
djm@6458 369 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
djm@6458 370 return (IA64_ILLOP_FAULT);
djm@6458 371 }
djm@6458 372 if (enabling_interrupts &&
djm@6458 373 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
djm@6458 374 PSCB(vcpu,pending_interruption) = 1;
djm@6458 375 return IA64_NO_FAULT;
djm@6458 376 }
djm@6458 377
djm@6458 378 IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
djm@6458 379 {
djm@7742 380 REGS *regs = vcpu_regs(vcpu);
djm@6458 381 struct ia64_psr newpsr;
djm@6458 382
djm@7742 383 newpsr = *(struct ia64_psr *)&regs->cr_ipsr;
djm@6458 384 if (newpsr.cpl == 2) newpsr.cpl = 0;
awilliam@9479 385 if (!vcpu->vcpu_info->evtchn_upcall_mask) newpsr.i = 1;
djm@6458 386 else newpsr.i = 0;
djm@6458 387 if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
djm@6458 388 else newpsr.ic = 0;
djm@7742 389 if (PSCB(vcpu,metaphysical_mode)) newpsr.dt = 0;
djm@7742 390 else newpsr.dt = 1;
awilliam@10433 391 if (PSCB(vcpu,vpsr_pp)) newpsr.pp = 1;
djm@6869 392 else newpsr.pp = 0;
djm@6458 393 *pval = *(unsigned long *)&newpsr;
djm@6458 394 return IA64_NO_FAULT;
djm@6458 395 }
djm@6458 396
djm@6458 397 BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
djm@6458 398 {
djm@6458 399 return !!PSCB(vcpu,interrupt_collection_enabled);
djm@6458 400 }
djm@6458 401
djm@6458 402 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
djm@6458 403 {
awilliam@9479 404 return !vcpu->vcpu_info->evtchn_upcall_mask;
djm@6458 405 }
djm@6458 406
djm@6458 407 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
djm@6458 408 {
djm@6458 409 UINT64 dcr = PSCBX(vcpu,dcr);
awilliam@9005 410 PSR psr;
djm@6458 411
awilliam@10242 412 //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...\n",prevpsr);
djm@6458 413 psr.i64 = prevpsr;
djm@6458 414 psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
djm@6458 415 psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
djm@6458 416 psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
awilliam@9479 417 psr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
djm@6458 418 psr.ia64_psr.bn = PSCB(vcpu,banknum);
djm@6458 419 psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
djm@6458 420 if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
djm@6458 421 // psr.pk = 1;
awilliam@10242 422 //printf("returns 0x%016lx...\n",psr.i64);
djm@6458 423 return psr.i64;
djm@6458 424 }
djm@6458 425
djm@6458 426 /**************************************************************************
djm@6458 427 VCPU control register access routines
djm@6458 428 **************************************************************************/
djm@6458 429
djm@6458 430 IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
djm@6458 431 {
djm@6458 432 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
djm@6458 433 // Reads of cr.dcr on Xen always have the sign bit set, so
djm@6458 434 // a domain can differentiate whether it is running on SP or not
djm@6458 435 *pval = PSCBX(vcpu,dcr) | 0x8000000000000000L;
djm@6458 436 return (IA64_NO_FAULT);
djm@6458 437 }
djm@6458 438
djm@6458 439 IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
djm@6458 440 {
djm@6801 441 if(VMX_DOMAIN(vcpu)){
djm@6801 442 *pval = PSCB(vcpu,iva) & ~0x7fffL;
djm@6801 443 }else{
djm@6801 444 *pval = PSCBX(vcpu,iva) & ~0x7fffL;
djm@6801 445 }
djm@6458 446 return (IA64_NO_FAULT);
djm@6458 447 }
djm@6458 448
djm@6458 449 IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
djm@6458 450 {
djm@6458 451 *pval = PSCB(vcpu,pta);
djm@6458 452 return (IA64_NO_FAULT);
djm@6458 453 }
djm@6458 454
djm@6458 455 IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
djm@6458 456 {
djm@6458 457 //REGS *regs = vcpu_regs(vcpu);
djm@6458 458 //*pval = regs->cr_ipsr;
djm@6458 459 *pval = PSCB(vcpu,ipsr);
djm@6458 460 return (IA64_NO_FAULT);
djm@6458 461 }
djm@6458 462
djm@6458 463 IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
djm@6458 464 {
djm@6458 465 *pval = PSCB(vcpu,isr);
djm@6458 466 return (IA64_NO_FAULT);
djm@6458 467 }
djm@6458 468
djm@6458 469 IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
djm@6458 470 {
djm@6458 471 //REGS *regs = vcpu_regs(vcpu);
djm@6458 472 //*pval = regs->cr_iip;
djm@6458 473 *pval = PSCB(vcpu,iip);
djm@6458 474 return (IA64_NO_FAULT);
djm@6458 475 }
djm@6458 476
djm@6458 477 IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
djm@6458 478 {
awilliam@10561 479 PRIVOP_COUNT_ADDR(vcpu_regs(vcpu),_GET_IFA);
awilliam@10561 480 *pval = PSCB(vcpu,ifa);
djm@6458 481 return (IA64_NO_FAULT);
djm@6458 482 }
djm@6458 483
djm@6458 484 unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
djm@6458 485 {
djm@6458 486 ia64_rr rr;
djm@6458 487
djm@6458 488 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
djm@6458 489 return(rr.ps);
djm@6458 490 }
djm@6458 491
djm@6458 492 unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
djm@6458 493 {
djm@6458 494 ia64_rr rr;
djm@6458 495
djm@6458 496 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
djm@6458 497 return(rr.rid);
djm@6458 498 }
djm@6458 499
djm@6458 500 unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
djm@6458 501 {
djm@6458 502 ia64_rr rr;
djm@6458 503
djm@6458 504 rr.rrval = 0;
djm@6458 505 rr.ps = vcpu_get_rr_ps(vcpu,ifa);
djm@6458 506 rr.rid = vcpu_get_rr_rid(vcpu,ifa);
djm@6458 507 return (rr.rrval);
djm@6458 508 }
djm@6458 509
djm@6458 510
djm@6458 511 IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
djm@6458 512 {
djm@6458 513 UINT64 val = PSCB(vcpu,itir);
djm@6458 514 *pval = val;
djm@6458 515 return (IA64_NO_FAULT);
djm@6458 516 }
djm@6458 517
djm@6458 518 IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
djm@6458 519 {
djm@6458 520 UINT64 val = PSCB(vcpu,iipa);
djm@6458 521 // SP entry code does not save iipa yet nor does it get
djm@6458 522 // properly delivered in the pscb
djm@6801 523 // printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
djm@6458 524 *pval = val;
djm@6458 525 return (IA64_NO_FAULT);
djm@6458 526 }
djm@6458 527
djm@6458 528 IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
djm@6458 529 {
djm@6458 530 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
djm@6458 531 //*pval = PSCB(vcpu,regs).cr_ifs;
djm@6458 532 *pval = PSCB(vcpu,ifs);
djm@6458 533 PSCB(vcpu,incomplete_regframe) = 0;
djm@6458 534 return (IA64_NO_FAULT);
djm@6458 535 }
djm@6458 536
djm@6458 537 IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
djm@6458 538 {
djm@6458 539 UINT64 val = PSCB(vcpu,iim);
djm@6458 540 *pval = val;
djm@6458 541 return (IA64_NO_FAULT);
djm@6458 542 }
djm@6458 543
djm@6458 544 IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
djm@6458 545 {
awilliam@10561 546 PRIVOP_COUNT_ADDR(vcpu_regs(vcpu),_THASH);
awilliam@10561 547 *pval = PSCB(vcpu,iha);
djm@6458 548 return (IA64_NO_FAULT);
djm@6458 549 }
djm@6458 550
djm@6458 551 IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
djm@6458 552 {
djm@6458 553 // Reads of cr.dcr on SP always have the sign bit set, so
djm@6458 554 // a domain can differentiate whether it is running on SP or not
djm@6458 555 // Thus, writes of DCR should ignore the sign bit
djm@6458 556 //verbose("vcpu_set_dcr: called\n");
djm@6458 557 PSCBX(vcpu,dcr) = val & ~0x8000000000000000L;
djm@6458 558 return (IA64_NO_FAULT);
djm@6458 559 }
djm@6458 560
djm@6458 561 IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
djm@6458 562 {
djm@6801 563 if(VMX_DOMAIN(vcpu)){
djm@6801 564 PSCB(vcpu,iva) = val & ~0x7fffL;
djm@6801 565 }else{
djm@6801 566 PSCBX(vcpu,iva) = val & ~0x7fffL;
djm@6801 567 }
djm@6458 568 return (IA64_NO_FAULT);
djm@6458 569 }
djm@6458 570
djm@6458 571 IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
djm@6458 572 {
djm@6458 573 if (val & IA64_PTA_LFMT) {
djm@6458 574 printf("*** No support for VHPT long format yet!!\n");
djm@6458 575 return (IA64_ILLOP_FAULT);
djm@6458 576 }
djm@6458 577 if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
djm@6458 578 if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
djm@6458 579 PSCB(vcpu,pta) = val;
djm@6458 580 return IA64_NO_FAULT;
djm@6458 581 }
djm@6458 582
djm@6458 583 IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
djm@6458 584 {
djm@6458 585 PSCB(vcpu,ipsr) = val;
djm@6458 586 return IA64_NO_FAULT;
djm@6458 587 }
djm@6458 588
djm@6458 589 IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
djm@6458 590 {
djm@6458 591 PSCB(vcpu,isr) = val;
djm@6458 592 return IA64_NO_FAULT;
djm@6458 593 }
djm@6458 594
djm@6458 595 IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
djm@6458 596 {
djm@6458 597 PSCB(vcpu,iip) = val;
djm@6458 598 return IA64_NO_FAULT;
djm@6458 599 }
djm@6458 600
djm@6458 601 IA64FAULT vcpu_increment_iip(VCPU *vcpu)
djm@6458 602 {
djm@6458 603 REGS *regs = vcpu_regs(vcpu);
djm@6458 604 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
djm@6458 605 if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
djm@6458 606 else ipsr->ri++;
djm@6458 607 return (IA64_NO_FAULT);
djm@6458 608 }
djm@6458 609
djm@6458 610 IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
djm@6458 611 {
djm@6458 612 PSCB(vcpu,ifa) = val;
djm@6458 613 return IA64_NO_FAULT;
djm@6458 614 }
djm@6458 615
djm@6458 616 IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
djm@6458 617 {
djm@6458 618 PSCB(vcpu,itir) = val;
djm@6458 619 return IA64_NO_FAULT;
djm@6458 620 }
djm@6458 621
djm@6458 622 IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
djm@6458 623 {
djm@6458 624 // SP entry code does not save iipa yet nor does it get
djm@6458 625 // properly delivered in the pscb
djm@6801 626 // printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
djm@6458 627 PSCB(vcpu,iipa) = val;
djm@6458 628 return IA64_NO_FAULT;
djm@6458 629 }
djm@6458 630
djm@6458 631 IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
djm@6458 632 {
djm@6458 633 //REGS *regs = vcpu_regs(vcpu);
djm@6458 634 PSCB(vcpu,ifs) = val;
djm@6458 635 return IA64_NO_FAULT;
djm@6458 636 }
djm@6458 637
djm@6458 638 IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
djm@6458 639 {
djm@6458 640 PSCB(vcpu,iim) = val;
djm@6458 641 return IA64_NO_FAULT;
djm@6458 642 }
djm@6458 643
djm@6458 644 IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
djm@6458 645 {
djm@6458 646 PSCB(vcpu,iha) = val;
djm@6458 647 return IA64_NO_FAULT;
djm@6458 648 }
djm@6458 649
djm@6458 650 /**************************************************************************
djm@6458 651 VCPU interrupt control register access routines
djm@6458 652 **************************************************************************/
djm@6458 653
djm@6458 654 void vcpu_pend_unspecified_interrupt(VCPU *vcpu)
djm@6458 655 {
djm@6458 656 PSCB(vcpu,pending_interruption) = 1;
djm@6458 657 }
djm@6458 658
djm@6458 659 void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
djm@6458 660 {
djm@6458 661 if (vector & ~0xff) {
djm@6458 662 printf("vcpu_pend_interrupt: bad vector\n");
djm@6458 663 return;
djm@6458 664 }
awilliam@10141 665
awilliam@10141 666 if (vcpu->arch.event_callback_ip) {
awilliam@10141 667 printf("Deprecated interface. Move to new event based solution\n");
awilliam@10141 668 return;
djm@6458 669 }
awilliam@10141 670
awilliam@10141 671 if ( VMX_DOMAIN(vcpu) ) {
awilliam@10141 672 set_bit(vector,VCPU(vcpu,irr));
awilliam@10141 673 } else {
awilliam@10141 674 set_bit(vector,PSCBX(vcpu,irr));
awilliam@10141 675 PSCB(vcpu,pending_interruption) = 1;
awilliam@10141 676 }
djm@6458 677 }
djm@6458 678
djm@6458 679 #define IA64_TPR_MMI 0x10000
djm@6458 680 #define IA64_TPR_MIC 0x000f0
djm@6458 681
djm@6458 682 /* checks to see if a VCPU has any unmasked pending interrupts
djm@6458 683 * if so, returns the highest, else returns SPURIOUS_VECTOR */
djm@6458 684 /* NOTE: Since this gets called from vcpu_get_ivr() and the
djm@6458 685 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
djm@6458 686 * this routine also ignores pscb.interrupt_delivery_enabled
djm@6458 687 * and this must be checked independently; see vcpu_deliverable interrupts() */
djm@6458 688 UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
djm@6458 689 {
awilliam@9169 690 UINT64 *p, *r, bits, bitnum, mask, i, vector;
djm@6458 691
awilliam@10141 692 if (vcpu->arch.event_callback_ip)
awilliam@10141 693 return SPURIOUS_VECTOR;
awilliam@10141 694
djm@6868 695 /* Always check pending event, since guest may just ack the
djm@6868 696 * event injection without handle. Later guest may throw out
djm@6868 697 * the event itself.
djm@6868 698 */
djm@8368 699 check_start:
djm@6868 700 if (event_pending(vcpu) &&
awilliam@10008 701 !test_bit(vcpu->domain->shared_info->arch.evtchn_vector,
djm@6868 702 &PSCBX(vcpu, insvc[0])))
awilliam@10008 703 vcpu_pend_interrupt(vcpu, vcpu->domain->shared_info->arch.evtchn_vector);
djm@6868 704
djm@6458 705 p = &PSCBX(vcpu,irr[3]);
djm@6458 706 r = &PSCBX(vcpu,insvc[3]);
awilliam@9169 707 for (i = 3; ; p--, r--, i--) {
awilliam@9169 708 bits = *p ;
djm@6458 709 if (bits) break; // got a potential interrupt
djm@6458 710 if (*r) {
djm@6458 711 // nothing in this word which is pending+inservice
djm@6458 712 // but there is one inservice which masks lower
djm@6458 713 return SPURIOUS_VECTOR;
djm@6458 714 }
djm@6458 715 if (i == 0) {
djm@6458 716 // checked all bits... nothing pending+inservice
djm@6458 717 return SPURIOUS_VECTOR;
djm@6458 718 }
djm@6458 719 }
djm@6458 720 // have a pending,deliverable interrupt... see if it is masked
djm@6458 721 bitnum = ia64_fls(bits);
awilliam@10242 722 //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...\n",bitnum);
djm@6458 723 vector = bitnum+(i*64);
djm@6458 724 mask = 1L << bitnum;
djm@8368 725 /* sanity check for guest timer interrupt */
djm@8368 726 if (vector == (PSCB(vcpu,itv) & 0xff)) {
djm@8368 727 uint64_t now = ia64_get_itc();
djm@8368 728 if (now < PSCBX(vcpu,domain_itm)) {
awilliam@9171 729 // printk("Ooops, pending guest timer before its due\n");
djm@8368 730 PSCBX(vcpu,irr[i]) &= ~mask;
djm@8368 731 goto check_start;
djm@8368 732 }
djm@8368 733 }
awilliam@10242 734 //printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...\n",vector);
djm@6458 735 if (*r >= mask) {
djm@6458 736 // masked by equal inservice
djm@6458 737 //printf("but masked by equal inservice\n");
djm@6458 738 return SPURIOUS_VECTOR;
djm@6458 739 }
djm@6458 740 if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
djm@6458 741 // tpr.mmi is set
djm@6458 742 //printf("but masked by tpr.mmi\n");
djm@6458 743 return SPURIOUS_VECTOR;
djm@6458 744 }
djm@6458 745 if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
djm@6458 746 //tpr.mic masks class
djm@6458 747 //printf("but masked by tpr.mic\n");
djm@6458 748 return SPURIOUS_VECTOR;
djm@6458 749 }
djm@6458 750
djm@6458 751 //printf("returned to caller\n");
djm@6458 752 return vector;
djm@6458 753 }
djm@6458 754
djm@6458 755 UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
djm@6458 756 {
djm@6458 757 return (vcpu_get_psr_i(vcpu) &&
djm@6458 758 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
djm@6458 759 }
djm@6458 760
djm@6458 761 UINT64 vcpu_deliverable_timer(VCPU *vcpu)
djm@6458 762 {
djm@6458 763 return (vcpu_get_psr_i(vcpu) &&
djm@6458 764 vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu,itv));
djm@6458 765 }
djm@6458 766
djm@6458 767 IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
djm@6458 768 {
awilliam@9267 769 /* Use real LID for domain0 until vIOSAPIC is present.
awilliam@9267 770 Use EID=0, ID=vcpu_id for domU. */
awilliam@9267 771 if (vcpu->domain == dom0)
awilliam@9267 772 *pval = ia64_getreg(_IA64_REG_CR_LID);
awilliam@9267 773 else
awilliam@9267 774 *pval = vcpu->vcpu_id << 24;
djm@6458 775 return IA64_NO_FAULT;
djm@6458 776 }
djm@6458 777
djm@6458 778 IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
djm@6458 779 {
djm@6458 780 int i;
djm@6458 781 UINT64 vector, mask;
djm@6458 782
djm@6458 783 #define HEARTBEAT_FREQ 16 // period in seconds
djm@6458 784 #ifdef HEARTBEAT_FREQ
djm@6458 785 #define N_DOMS 16 // period in seconds
awilliam@9005 786 #if 0
djm@6458 787 static long count[N_DOMS] = { 0 };
awilliam@9005 788 #endif
djm@6458 789 static long nonclockcount[N_DOMS] = { 0 };
djm@6458 790 unsigned domid = vcpu->domain->domain_id;
djm@6458 791 #endif
djm@6458 792 #ifdef IRQ_DEBUG
djm@6458 793 static char firstivr = 1;
djm@6458 794 static char firsttime[256];
djm@6458 795 if (firstivr) {
djm@6458 796 int i;
djm@6458 797 for (i=0;i<256;i++) firsttime[i]=1;
djm@6458 798 firstivr=0;
djm@6458 799 }
djm@6458 800 #endif
djm@6458 801
djm@6458 802 vector = vcpu_check_pending_interrupts(vcpu);
djm@6458 803 if (vector == SPURIOUS_VECTOR) {
djm@6458 804 PSCB(vcpu,pending_interruption) = 0;
djm@6458 805 *pval = vector;
djm@6458 806 return IA64_NO_FAULT;
djm@6458 807 }
djm@6458 808 #ifdef HEARTBEAT_FREQ
djm@6458 809 if (domid >= N_DOMS) domid = N_DOMS-1;
djm@7333 810 #if 0
djm@6458 811 if (vector == (PSCB(vcpu,itv) & 0xff)) {
djm@6458 812 if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
djm@6458 813 printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
djm@6458 814 domid, count[domid], nonclockcount[domid]);
djm@6458 815 //count[domid] = 0;
djm@6458 816 //dump_runq();
djm@6458 817 }
djm@6458 818 }
djm@7333 819 #endif
djm@6458 820 else nonclockcount[domid]++;
djm@6458 821 #endif
djm@6458 822 // now have an unmasked, pending, deliverable vector!
djm@6458 823 // getting ivr has "side effects"
djm@6458 824 #ifdef IRQ_DEBUG
djm@6458 825 if (firsttime[vector]) {
awilliam@9005 826 printf("*** First get_ivr on vector=%lu,itc=%lx\n",
djm@6458 827 vector,ia64_get_itc());
djm@6458 828 firsttime[vector]=0;
djm@6458 829 }
djm@6458 830 #endif
djm@8368 831 /* if delivering a timer interrupt, remember domain_itm, which
djm@8368 832 * needs to be done before clearing irr
djm@8368 833 */
djm@8368 834 if (vector == (PSCB(vcpu,itv) & 0xff)) {
djm@8368 835 PSCBX(vcpu,domain_itm_last) = PSCBX(vcpu,domain_itm);
djm@8368 836 }
djm@8368 837
djm@6458 838 i = vector >> 6;
djm@6458 839 mask = 1L << (vector & 0x3f);
awilliam@9005 840 //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %lu\n",vector);
djm@6458 841 PSCBX(vcpu,insvc[i]) |= mask;
djm@6458 842 PSCBX(vcpu,irr[i]) &= ~mask;
djm@6458 843 //PSCB(vcpu,pending_interruption)--;
djm@6458 844 *pval = vector;
djm@6458 845 return IA64_NO_FAULT;
djm@6458 846 }
djm@6458 847
djm@6458 848 IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
djm@6458 849 {
djm@6458 850 *pval = PSCB(vcpu,tpr);
djm@6458 851 return (IA64_NO_FAULT);
djm@6458 852 }
djm@6458 853
djm@6458 854 IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
djm@6458 855 {
djm@6458 856 *pval = 0L; // reads of eoi always return 0
djm@6458 857 return (IA64_NO_FAULT);
djm@6458 858 }
djm@6458 859
djm@6458 860 IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
djm@6458 861 {
awilliam@9488 862 *pval = PSCBX(vcpu, irr[0]);
djm@6458 863 return (IA64_NO_FAULT);
djm@6458 864 }
djm@6458 865
djm@6458 866 IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
djm@6458 867 {
awilliam@9488 868 *pval = PSCBX(vcpu, irr[1]);
djm@6458 869 return (IA64_NO_FAULT);
djm@6458 870 }
djm@6458 871
djm@6458 872 IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
djm@6458 873 {
awilliam@9488 874 *pval = PSCBX(vcpu, irr[2]);
djm@6458 875 return (IA64_NO_FAULT);
djm@6458 876 }
djm@6458 877
djm@6458 878 IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
djm@6458 879 {
awilliam@9488 880 *pval = PSCBX(vcpu, irr[3]);
djm@6458 881 return (IA64_NO_FAULT);
djm@6458 882 }
djm@6458 883
djm@6458 884 IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
djm@6458 885 {
djm@6458 886 *pval = PSCB(vcpu,itv);
djm@6458 887 return (IA64_NO_FAULT);
djm@6458 888 }
djm@6458 889
djm@6458 890 IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
djm@6458 891 {
djm@6458 892 *pval = PSCB(vcpu,pmv);
djm@6458 893 return (IA64_NO_FAULT);
djm@6458 894 }
djm@6458 895
djm@6458 896 IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
djm@6458 897 {
djm@6458 898 *pval = PSCB(vcpu,cmcv);
djm@6458 899 return (IA64_NO_FAULT);
djm@6458 900 }
djm@6458 901
djm@6458 902 IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
djm@6458 903 {
djm@6458 904 // fix this when setting values other than m-bit is supported
djm@6458 905 printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
djm@6458 906 *pval = (1L << 16);
djm@6458 907 return (IA64_NO_FAULT);
djm@6458 908 }
djm@6458 909
djm@6458 910 IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
djm@6458 911 {
djm@6458 912 // fix this when setting values other than m-bit is supported
djm@6458 913 printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
djm@6458 914 *pval = (1L << 16);
djm@6458 915 return (IA64_NO_FAULT);
djm@6458 916 }
djm@6458 917
djm@6458 918 IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
djm@6458 919 {
djm@6458 920 printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
djm@6458 921 return (IA64_ILLOP_FAULT);
djm@6458 922 }
djm@6458 923
djm@6458 924 IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
djm@6458 925 {
djm@6458 926 if (val & 0xff00) return IA64_RSVDREG_FAULT;
djm@6458 927 PSCB(vcpu,tpr) = val;
awilliam@9158 928 /* This can unmask interrupts. */
djm@6458 929 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
djm@6458 930 PSCB(vcpu,pending_interruption) = 1;
djm@6458 931 return (IA64_NO_FAULT);
djm@6458 932 }
djm@6458 933
djm@6458 934 IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
djm@6458 935 {
djm@6458 936 UINT64 *p, bits, vec, bitnum;
djm@6458 937 int i;
djm@6458 938
djm@6458 939 p = &PSCBX(vcpu,insvc[3]);
djm@6458 940 for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
djm@6458 941 if (i < 0) {
awilliam@9083 942 printf("Trying to EOI interrupt when none are in-service.\n");
awilliam@9083 943 return IA64_NO_FAULT;
djm@6458 944 }
djm@6458 945 bitnum = ia64_fls(bits);
djm@6458 946 vec = bitnum + (i*64);
djm@6458 947 /* clear the correct bit */
djm@6458 948 bits &= ~(1L << bitnum);
djm@6458 949 *p = bits;
djm@6458 950 /* clearing an eoi bit may unmask another pending interrupt... */
awilliam@9479 951 if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled...
djm@6458 952 // worry about this later... Linux only calls eoi
djm@6458 953 // with interrupts disabled
awilliam@9083 954 printf("Trying to EOI interrupt with interrupts enabled\n");
djm@6458 955 }
djm@6458 956 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
djm@6458 957 PSCB(vcpu,pending_interruption) = 1;
djm@6458 958 //printf("YYYYY vcpu_set_eoi: Successful\n");
djm@6458 959 return (IA64_NO_FAULT);
djm@6458 960 }
djm@6458 961
djm@6458 962 IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
djm@6458 963 {
djm@6458 964 if (!(val & (1L << 16))) {
djm@6458 965 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
djm@6458 966 return (IA64_ILLOP_FAULT);
djm@6458 967 }
djm@6458 968 // no place to save this state but nothing to do anyway
djm@6458 969 return (IA64_NO_FAULT);
djm@6458 970 }
djm@6458 971
djm@6458 972 IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
djm@6458 973 {
djm@6458 974 if (!(val & (1L << 16))) {
djm@6458 975 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
djm@6458 976 return (IA64_ILLOP_FAULT);
djm@6458 977 }
djm@6458 978 // no place to save this state but nothing to do anyway
djm@6458 979 return (IA64_NO_FAULT);
djm@6458 980 }
djm@6458 981
djm@6458 982 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
djm@6458 983 {
awilliam@9769 984 /* Check reserved fields. */
awilliam@9769 985 if (val & 0xef00)
awilliam@9769 986 return (IA64_ILLOP_FAULT);
djm@6458 987 PSCB(vcpu,itv) = val;
djm@6458 988 if (val & 0x10000) {
awilliam@9769 989 /* Disable itm. */
djm@6458 990 PSCBX(vcpu,domain_itm) = 0;
djm@6458 991 }
awilliam@9008 992 else vcpu_set_next_timer(vcpu);
djm@6458 993 return (IA64_NO_FAULT);
djm@6458 994 }
djm@6458 995
djm@6458 996 IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
djm@6458 997 {
djm@6458 998 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
djm@6458 999 PSCB(vcpu,pmv) = val;
djm@6458 1000 return (IA64_NO_FAULT);
djm@6458 1001 }
djm@6458 1002
djm@6458 1003 IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
djm@6458 1004 {
djm@6458 1005 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
djm@6458 1006 PSCB(vcpu,cmcv) = val;
djm@6458 1007 return (IA64_NO_FAULT);
djm@6458 1008 }
djm@6458 1009
djm@6458 1010 /**************************************************************************
djm@6458 1011 VCPU temporary register access routines
djm@6458 1012 **************************************************************************/
djm@6458 1013 UINT64 vcpu_get_tmp(VCPU *vcpu, UINT64 index)
djm@6458 1014 {
djm@6458 1015 if (index > 7) return 0;
djm@6458 1016 return PSCB(vcpu,tmp[index]);
djm@6458 1017 }
djm@6458 1018
djm@6458 1019 void vcpu_set_tmp(VCPU *vcpu, UINT64 index, UINT64 val)
djm@6458 1020 {
djm@6458 1021 if (index <= 7) PSCB(vcpu,tmp[index]) = val;
djm@6458 1022 }
djm@6458 1023
djm@6458 1024 /**************************************************************************
djm@6458 1025 Interval timer routines
djm@6458 1026 **************************************************************************/
djm@6458 1027
djm@6458 1028 BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
djm@6458 1029 {
djm@6458 1030 UINT64 itv = PSCB(vcpu,itv);
djm@6458 1031 return(!itv || !!(itv & 0x10000));
djm@6458 1032 }
djm@6458 1033
djm@6458 1034 BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
djm@6458 1035 {
djm@6458 1036 UINT64 itv = PSCB(vcpu,itv);
djm@6458 1037 return (test_bit(itv, PSCBX(vcpu,insvc)));
djm@6458 1038 }
djm@6458 1039
djm@6458 1040 BOOLEAN vcpu_timer_expired(VCPU *vcpu)
djm@6458 1041 {
djm@6458 1042 unsigned long domain_itm = PSCBX(vcpu,domain_itm);
djm@6458 1043 unsigned long now = ia64_get_itc();
djm@6458 1044
djm@6458 1045 if (!domain_itm) return FALSE;
djm@6458 1046 if (now < domain_itm) return FALSE;
djm@6458 1047 if (vcpu_timer_disabled(vcpu)) return FALSE;
djm@6458 1048 return TRUE;
djm@6458 1049 }
djm@6458 1050
djm@6458 1051 void vcpu_safe_set_itm(unsigned long val)
djm@6458 1052 {
djm@6458 1053 unsigned long epsilon = 100;
djm@7143 1054 unsigned long flags;
djm@6458 1055 UINT64 now = ia64_get_itc();
djm@6458 1056
djm@7143 1057 local_irq_save(flags);
djm@6458 1058 while (1) {
djm@6458 1059 //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
djm@6458 1060 ia64_set_itm(val);
djm@6458 1061 if (val > (now = ia64_get_itc())) break;
djm@6458 1062 val = now + epsilon;
djm@6458 1063 epsilon <<= 1;
djm@6458 1064 }
djm@7143 1065 local_irq_restore(flags);
djm@6458 1066 }
djm@6458 1067
djm@6458 1068 void vcpu_set_next_timer(VCPU *vcpu)
djm@6458 1069 {
djm@6458 1070 UINT64 d = PSCBX(vcpu,domain_itm);
djm@6458 1071 //UINT64 s = PSCBX(vcpu,xen_itm);
djm@6458 1072 UINT64 s = local_cpu_data->itm_next;
djm@6458 1073 UINT64 now = ia64_get_itc();
djm@6458 1074
djm@6458 1075 /* gloss over the wraparound problem for now... we know it exists
djm@6458 1076 * but it doesn't matter right now */
djm@6458 1077
kaf24@8507 1078 if (is_idle_domain(vcpu->domain)) {
djm@6469 1079 // printf("****** vcpu_set_next_timer called during idle!!\n");
djm@6466 1080 vcpu_safe_set_itm(s);
djm@6466 1081 return;
djm@6458 1082 }
djm@6458 1083 //s = PSCBX(vcpu,xen_itm);
djm@6458 1084 if (d && (d > now) && (d < s)) {
djm@6458 1085 vcpu_safe_set_itm(d);
djm@6458 1086 //using_domain_as_itm++;
djm@6458 1087 }
djm@6458 1088 else {
djm@6458 1089 vcpu_safe_set_itm(s);
djm@6458 1090 //using_xen_as_itm++;
djm@6458 1091 }
djm@6458 1092 }
djm@6458 1093
djm@6458 1094 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
djm@6458 1095 {
awilliam@9005 1096 //UINT now = ia64_get_itc();
djm@6458 1097
djm@6458 1098 //if (val < now) val = now + 1000;
djm@6458 1099 //printf("*** vcpu_set_itm: called with %lx\n",val);
djm@6458 1100 PSCBX(vcpu,domain_itm) = val;
djm@6458 1101 vcpu_set_next_timer(vcpu);
djm@6458 1102 return (IA64_NO_FAULT);
djm@6458 1103 }
djm@6458 1104
djm@6458 1105 IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
djm@6458 1106 {
awilliam@9005 1107 #define DISALLOW_SETTING_ITC_FOR_NOW
awilliam@9005 1108 #ifdef DISALLOW_SETTING_ITC_FOR_NOW
awilliam@9769 1109 static int did_print;
awilliam@9769 1110 if (!did_print) {
awilliam@9769 1111 printf("vcpu_set_itc: Setting ar.itc is currently disabled\n");
awilliam@9769 1112 printf("(this message is only displayed one)\n");
awilliam@9769 1113 did_print = 1;
awilliam@9769 1114 }
awilliam@9005 1115 #else
djm@6458 1116 UINT64 oldnow = ia64_get_itc();
djm@6458 1117 UINT64 olditm = PSCBX(vcpu,domain_itm);
djm@6458 1118 unsigned long d = olditm - oldnow;
djm@6458 1119 unsigned long x = local_cpu_data->itm_next - oldnow;
djm@6458 1120
djm@6458 1121 UINT64 newnow = val, min_delta;
djm@6458 1122
djm@6458 1123 local_irq_disable();
djm@6458 1124 if (olditm) {
djm@6458 1125 printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
djm@6458 1126 PSCBX(vcpu,domain_itm) = newnow + d;
djm@6458 1127 }
djm@6458 1128 local_cpu_data->itm_next = newnow + x;
djm@6458 1129 d = PSCBX(vcpu,domain_itm);
djm@6458 1130 x = local_cpu_data->itm_next;
djm@6458 1131
djm@6458 1132 ia64_set_itc(newnow);
djm@6458 1133 if (d && (d > newnow) && (d < x)) {
djm@6458 1134 vcpu_safe_set_itm(d);
djm@6458 1135 //using_domain_as_itm++;
djm@6458 1136 }
djm@6458 1137 else {
djm@6458 1138 vcpu_safe_set_itm(x);
djm@6458 1139 //using_xen_as_itm++;
djm@6458 1140 }
djm@6458 1141 local_irq_enable();
djm@6458 1142 #endif
djm@6458 1143 return (IA64_NO_FAULT);
djm@6458 1144 }
djm@6458 1145
djm@6458 1146 IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
djm@6458 1147 {
djm@6458 1148 //FIXME: Implement this
djm@6458 1149 printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
djm@6458 1150 return (IA64_NO_FAULT);
djm@6458 1151 //return (IA64_ILLOP_FAULT);
djm@6458 1152 }
djm@6458 1153
djm@6458 1154 IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
djm@6458 1155 {
djm@6458 1156 //TODO: Implement this
djm@6458 1157 printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
djm@6458 1158 return (IA64_ILLOP_FAULT);
djm@6458 1159 }
djm@6458 1160
djm@6458 1161 void vcpu_pend_timer(VCPU *vcpu)
djm@6458 1162 {
djm@6458 1163 UINT64 itv = PSCB(vcpu,itv) & 0xff;
djm@6458 1164
djm@6458 1165 if (vcpu_timer_disabled(vcpu)) return;
djm@6458 1166 //if (vcpu_timer_inservice(vcpu)) return;
djm@6458 1167 if (PSCBX(vcpu,domain_itm_last) == PSCBX(vcpu,domain_itm)) {
djm@6458 1168 // already delivered an interrupt for this so
djm@6458 1169 // don't deliver another
djm@6458 1170 return;
djm@6458 1171 }
awilliam@10141 1172 if (vcpu->arch.event_callback_ip) {
awilliam@10141 1173 /* A small window may occur when injecting vIRQ while related
awilliam@10141 1174 * handler has not been registered. Don't fire in such case.
awilliam@10141 1175 */
awilliam@10141 1176 if (vcpu->virq_to_evtchn[VIRQ_ITC]) {
awilliam@10141 1177 send_guest_vcpu_virq(vcpu, VIRQ_ITC);
awilliam@10141 1178 PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
awilliam@10141 1179 }
awilliam@10141 1180 } else
awilliam@10141 1181 vcpu_pend_interrupt(vcpu, itv);
djm@6458 1182 }
djm@6458 1183
djm@6458 1184 // returns true if ready to deliver a timer interrupt too early
djm@6458 1185 UINT64 vcpu_timer_pending_early(VCPU *vcpu)
djm@6458 1186 {
djm@6458 1187 UINT64 now = ia64_get_itc();
djm@6458 1188 UINT64 itm = PSCBX(vcpu,domain_itm);
djm@6458 1189
djm@6458 1190 if (vcpu_timer_disabled(vcpu)) return 0;
djm@6458 1191 if (!itm) return 0;
djm@6458 1192 return (vcpu_deliverable_timer(vcpu) && (now < itm));
djm@6458 1193 }
djm@6458 1194
djm@6458 1195 /**************************************************************************
djm@6458 1196 Privileged operation emulation routines
djm@6458 1197 **************************************************************************/
djm@6458 1198
awilliam@10236 1199 static void
awilliam@10236 1200 vcpu_force_tlb_miss(VCPU* vcpu, UINT64 ifa)
awilliam@10236 1201 {
awilliam@10236 1202 PSCB(vcpu, ifa) = ifa;
awilliam@10236 1203 PSCB(vcpu, itir) = vcpu_get_itir_on_fault(vcpu, ifa);
awilliam@10236 1204 vcpu_thash(current, ifa, &PSCB(current, iha));
awilliam@10236 1205 }
awilliam@10236 1206
awilliam@10236 1207 IA64FAULT vcpu_force_inst_miss(VCPU *vcpu, UINT64 ifa)
awilliam@10236 1208 {
awilliam@10236 1209 vcpu_force_tlb_miss(vcpu, ifa);
awilliam@10236 1210 return (vcpu_get_rr_ve(vcpu, ifa)? IA64_INST_TLB_VECTOR: IA64_ALT_INST_TLB_VECTOR);
awilliam@10236 1211 }
awilliam@10236 1212
djm@6458 1213 IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
djm@6458 1214 {
awilliam@10236 1215 vcpu_force_tlb_miss(vcpu, ifa);
awilliam@10236 1216 return (vcpu_get_rr_ve(vcpu, ifa)? IA64_DATA_TLB_VECTOR: IA64_ALT_DATA_TLB_VECTOR);
djm@6458 1217 }
djm@6458 1218
djm@6458 1219 IA64FAULT vcpu_rfi(VCPU *vcpu)
djm@6458 1220 {
djm@6458 1221 // TODO: Only allowed for current vcpu
djm@6458 1222 PSR psr;
djm@6458 1223 UINT64 int_enable, regspsr = 0;
djm@6458 1224 UINT64 ifs;
djm@6458 1225 REGS *regs = vcpu_regs(vcpu);
djm@6458 1226 extern void dorfirfi(void);
djm@6458 1227
djm@6458 1228 psr.i64 = PSCB(vcpu,ipsr);
djm@6458 1229 if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
djm@6458 1230 int_enable = psr.ia64_psr.i;
djm@6458 1231 if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
djm@6458 1232 if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
djm@6458 1233 else vcpu_set_metaphysical_mode(vcpu,TRUE);
djm@6458 1234 psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
djm@6458 1235 psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
djm@6458 1236 psr.ia64_psr.bn = 1;
djm@6458 1237 //psr.pk = 1; // checking pkeys shouldn't be a problem but seems broken
djm@6458 1238 if (psr.ia64_psr.be) {
djm@6458 1239 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
djm@6458 1240 return (IA64_ILLOP_FAULT);
djm@6458 1241 }
djm@6458 1242 PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
djm@6458 1243 ifs = PSCB(vcpu,ifs);
djm@6458 1244 //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
djm@6458 1245 //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
djm@6458 1246 if (ifs & regs->cr_ifs & 0x8000000000000000L) {
djm@6458 1247 // TODO: validate PSCB(vcpu,iip)
djm@6458 1248 // TODO: PSCB(vcpu,ipsr) = psr;
djm@6458 1249 PSCB(vcpu,ipsr) = psr.i64;
djm@6458 1250 // now set up the trampoline
djm@6458 1251 regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
djm@6458 1252 __asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
djm@6458 1253 regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
djm@6458 1254 }
djm@6458 1255 else {
djm@6458 1256 regs->cr_ipsr = psr.i64;
djm@6458 1257 regs->cr_iip = PSCB(vcpu,iip);
djm@6458 1258 }
djm@6458 1259 PSCB(vcpu,interrupt_collection_enabled) = 1;
djm@6458 1260 vcpu_bsw1(vcpu);
awilliam@9479 1261 vcpu->vcpu_info->evtchn_upcall_mask = !int_enable;
djm@6458 1262 return (IA64_NO_FAULT);
djm@6458 1263 }
djm@6458 1264
djm@6458 1265 IA64FAULT vcpu_cover(VCPU *vcpu)
djm@6458 1266 {
djm@6458 1267 // TODO: Only allowed for current vcpu
djm@6458 1268 REGS *regs = vcpu_regs(vcpu);
djm@6458 1269
djm@6458 1270 if (!PSCB(vcpu,interrupt_collection_enabled)) {
djm@6458 1271 if (!PSCB(vcpu,incomplete_regframe))
djm@6458 1272 PSCB(vcpu,ifs) = regs->cr_ifs;
djm@6458 1273 else PSCB(vcpu,incomplete_regframe) = 0;
djm@6458 1274 }
djm@6458 1275 regs->cr_ifs = 0;
djm@6458 1276 return (IA64_NO_FAULT);
djm@6458 1277 }
djm@6458 1278
djm@6458 1279 IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
djm@6458 1280 {
djm@6458 1281 UINT64 pta = PSCB(vcpu,pta);
djm@6458 1282 UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
djm@6458 1283 UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
djm@6458 1284 UINT64 Mask = (1L << pta_sz) - 1;
djm@6458 1285 UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
djm@6458 1286 UINT64 compMask_60_15 = ~Mask_60_15;
djm@6458 1287 UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
djm@6458 1288 UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
djm@6458 1289 UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
djm@6458 1290 UINT64 VHPT_addr2a =
djm@6458 1291 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
djm@6458 1292 UINT64 VHPT_addr2b =
awilliam@9314 1293 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;
djm@6458 1294 UINT64 VHPT_addr3 = VHPT_offset & 0x7fff;
djm@6458 1295 UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
djm@6458 1296 VHPT_addr3;
djm@6458 1297
djm@6458 1298 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
djm@6458 1299 *pval = VHPT_addr;
djm@6458 1300 return (IA64_NO_FAULT);
djm@6458 1301 }
djm@6458 1302
djm@6458 1303 IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
djm@6458 1304 {
djm@6458 1305 printf("vcpu_ttag: ttag instruction unsupported\n");
djm@6458 1306 return (IA64_ILLOP_FAULT);
djm@6458 1307 }
djm@6458 1308
djm@7727 1309 int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
djm@7727 1310
awilliam@10246 1311 /* Return TRUE iff [b1,e1] and [b2,e2] partially or fully overlaps. */
awilliam@10246 1312 static inline int range_overlap (u64 b1, u64 e1, u64 b2, u64 e2)
awilliam@10246 1313 {
awilliam@10246 1314 return (b1 <= e2) && (e1 >= b2);
awilliam@10246 1315 }
awilliam@10246 1316
awilliam@10436 1317 /* Crash domain if [base, base + page_size] and Xen virtual space overlaps.
awilliam@10436 1318 Note: LSBs of base inside page_size are ignored. */
awilliam@10436 1319 static inline void
awilliam@10436 1320 check_xen_space_overlap (const char *func, u64 base, u64 page_size)
awilliam@10436 1321 {
awilliam@10436 1322 /* Mask LSBs of base. */
awilliam@10436 1323 base &= ~(page_size - 1);
awilliam@10436 1324
awilliam@10436 1325 /* FIXME: ideally an MCA should be generated... */
awilliam@10444 1326 if (range_overlap (HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END,
awilliam@10436 1327 base, base + page_size))
awilliam@10436 1328 panic_domain (NULL, "%s on Xen virtual space (%lx)\n",
awilliam@10436 1329 func, base);
awilliam@10436 1330 }
awilliam@10436 1331
awilliam@9483 1332 // FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
awilliam@9504 1333 static inline int vcpu_match_tr_entry_no_p(TR_ENTRY *trp, UINT64 ifa, UINT64 rid)
awilliam@9504 1334 {
awilliam@9504 1335 return trp->rid == rid
awilliam@9504 1336 && ifa >= trp->vadr
awilliam@9504 1337 && ifa <= (trp->vadr + (1L << trp->ps) - 1);
awilliam@9504 1338 }
awilliam@9504 1339
awilliam@9483 1340 static inline int vcpu_match_tr_entry(TR_ENTRY *trp, UINT64 ifa, UINT64 rid)
awilliam@9483 1341 {
awilliam@9504 1342 return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
awilliam@9483 1343 }
awilliam@9483 1344
awilliam@10246 1345 static inline int
awilliam@10246 1346 vcpu_match_tr_entry_range(TR_ENTRY *trp, UINT64 rid, u64 b, u64 e)
awilliam@10246 1347 {
awilliam@10246 1348 return trp->rid == rid
awilliam@10246 1349 && trp->pte.p
awilliam@10246 1350 && range_overlap (b, e,
awilliam@10246 1351 trp->vadr, trp->vadr + (1L << trp->ps) - 1);
awilliam@10246 1352
awilliam@10246 1353 }
awilliam@10246 1354
awilliam@10236 1355 static TR_ENTRY*
awilliam@10236 1356 vcpu_tr_lookup(VCPU* vcpu, unsigned long va, UINT64 rid, BOOLEAN is_data)
awilliam@10236 1357 {
awilliam@10570 1358 unsigned char* regions;
awilliam@10236 1359 TR_ENTRY *trp;
awilliam@10236 1360 int tr_max;
awilliam@10236 1361 int i;
awilliam@10236 1362
awilliam@10236 1363 if (is_data) {
awilliam@10236 1364 // data
awilliam@10236 1365 regions = &vcpu->arch.dtr_regions;
awilliam@10236 1366 trp = vcpu->arch.dtrs;
awilliam@10236 1367 tr_max = sizeof(vcpu->arch.dtrs)/sizeof(vcpu->arch.dtrs[0]);
awilliam@10236 1368 } else {
awilliam@10236 1369 // instruction
awilliam@10236 1370 regions = &vcpu->arch.itr_regions;
awilliam@10236 1371 trp = vcpu->arch.itrs;
awilliam@10236 1372 tr_max = sizeof(vcpu->arch.itrs)/sizeof(vcpu->arch.itrs[0]);
awilliam@10236 1373 }
awilliam@10236 1374
awilliam@10236 1375 if (!vcpu_quick_region_check(*regions, va)) {
awilliam@10236 1376 return NULL;
awilliam@10236 1377 }
awilliam@10236 1378 for (i = 0; i < tr_max; i++, trp++) {
awilliam@10236 1379 if (vcpu_match_tr_entry(trp, va, rid)) {
awilliam@10236 1380 return trp;
awilliam@10236 1381 }
awilliam@10236 1382 }
awilliam@10236 1383 return NULL;
awilliam@10236 1384 }
awilliam@10236 1385
awilliam@10236 1386 // return value
awilliam@10236 1387 // 0: failure
awilliam@10236 1388 // 1: success
awilliam@10236 1389 int
awilliam@10236 1390 vcpu_get_domain_bundle(VCPU* vcpu, REGS* regs, UINT64 gip, IA64_BUNDLE* bundle)
awilliam@10236 1391 {
awilliam@10236 1392 UINT64 gpip;// guest pseudo phyiscal ip
awilliam@10424 1393 unsigned long vaddr;
awilliam@10424 1394 struct page_info* page;
awilliam@10236 1395
awilliam@10424 1396 again:
awilliam@10236 1397 #if 0
awilliam@10236 1398 // Currently xen doesn't track psr.it bits.
awilliam@10236 1399 // it assumes always psr.it = 1.
awilliam@10236 1400 if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
awilliam@10236 1401 gpip = gip;
awilliam@10236 1402 } else
awilliam@10236 1403 #endif
awilliam@10236 1404 {
awilliam@10236 1405 unsigned long region = REGION_NUMBER(gip);
awilliam@10236 1406 unsigned long rr = PSCB(vcpu, rrs)[region];
awilliam@10236 1407 unsigned long rid = rr & RR_RID_MASK;
awilliam@10236 1408 BOOLEAN swap_rr0;
awilliam@10236 1409 TR_ENTRY* trp;
awilliam@10236 1410
awilliam@10236 1411 // vcpu->arch.{i, d}tlb are volatile,
awilliam@10236 1412 // copy its value to the variable, tr, before use.
awilliam@10236 1413 TR_ENTRY tr;
awilliam@10236 1414
awilliam@10236 1415 trp = vcpu_tr_lookup(vcpu, gip, rid, 0);
awilliam@10236 1416 if (trp != NULL) {
awilliam@10236 1417 tr = *trp;
awilliam@10236 1418 goto found;
awilliam@10236 1419 }
awilliam@10236 1420 // When it failed to get a bundle, itlb miss is reflected.
awilliam@10236 1421 // Last itc.i value is cached to PSCBX(vcpu, itlb).
awilliam@10236 1422 tr = PSCBX(vcpu, itlb);
awilliam@10236 1423 if (vcpu_match_tr_entry(&tr, gip, rid)) {
awilliam@10236 1424 //DPRINTK("%s gip 0x%lx gpip 0x%lx\n", __func__, gip, gpip);
awilliam@10236 1425 goto found;
awilliam@10236 1426 }
awilliam@10236 1427 trp = vcpu_tr_lookup(vcpu, gip, rid, 1);
awilliam@10236 1428 if (trp != NULL) {
awilliam@10236 1429 tr = *trp;
awilliam@10236 1430 goto found;
awilliam@10236 1431 }
awilliam@10236 1432 #if 0
awilliam@10236 1433 tr = PSCBX(vcpu, dtlb);
awilliam@10236 1434 if (vcpu_match_tr_entry(&tr, gip, rid)) {
awilliam@10236 1435 goto found;
awilliam@10236 1436 }
awilliam@10236 1437 #endif
awilliam@10236 1438
awilliam@10236 1439 // try to access gip with guest virtual address
awilliam@10236 1440 // This may cause tlb miss. see vcpu_translate(). Be careful!
awilliam@10236 1441 swap_rr0 = (!region && PSCB(vcpu, metaphysical_mode));
awilliam@10236 1442 if (swap_rr0) {
awilliam@10236 1443 set_one_rr(0x0, PSCB(vcpu, rrs[0]));
awilliam@10236 1444 }
awilliam@10236 1445 *bundle = __get_domain_bundle(gip);
awilliam@10236 1446 if (swap_rr0) {
awilliam@10236 1447 set_metaphysical_rr0();
awilliam@10236 1448 }
awilliam@10236 1449 if (bundle->i64[0] == 0 && bundle->i64[1] == 0) {
awilliam@10236 1450 DPRINTK("%s gip 0x%lx\n", __func__, gip);
awilliam@10236 1451 return 0;
awilliam@10236 1452 }
awilliam@10236 1453 return 1;
awilliam@10236 1454
awilliam@10236 1455 found:
awilliam@10236 1456 gpip = ((tr.pte.ppn >> (tr.ps - 12)) << tr.ps) |
awilliam@10236 1457 (gip & ((1 << tr.ps) - 1));
awilliam@10236 1458 }
awilliam@10424 1459
awilliam@10436 1460 vaddr = (unsigned long)domain_mpa_to_imva(vcpu->domain, gpip);
awilliam@10424 1461 page = virt_to_page(vaddr);
awilliam@10424 1462 if (get_page(page, vcpu->domain) == 0) {
awilliam@10424 1463 if (page_get_owner(page) != vcpu->domain) {
awilliam@10424 1464 // This page might be a page granted by another
awilliam@10424 1465 // domain.
awilliam@10424 1466 panic_domain(regs,
awilliam@10424 1467 "domain tries to execute foreign domain "
awilliam@10424 1468 "page which might be mapped by grant "
awilliam@10424 1469 "table.\n");
awilliam@10424 1470 }
awilliam@10424 1471 goto again;
awilliam@10424 1472 }
awilliam@10424 1473 *bundle = *((IA64_BUNDLE*)vaddr);
awilliam@10424 1474 put_page(page);
awilliam@10236 1475 return 1;
awilliam@10236 1476 }
awilliam@10236 1477
awilliam@9860 1478 IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
djm@6458 1479 {
djm@7731 1480 unsigned long region = address >> 61;
awilliam@9504 1481 unsigned long pta, rid, rr;
awilliam@9504 1482 union pte_flags pte;
djm@6458 1483 TR_ENTRY *trp;
djm@6458 1484
djm@7731 1485 if (PSCB(vcpu,metaphysical_mode) && !(!is_data && region)) {
djm@7727 1486 // dom0 may generate an uncacheable physical address (msb=1)
djm@7727 1487 if (region && ((region != 4) || (vcpu->domain != dom0))) {
djm@7727 1488 // FIXME: This seems to happen even though it shouldn't. Need to track
djm@7727 1489 // this down, but since it has been apparently harmless, just flag it for now
djm@7727 1490 // panic_domain(vcpu_regs(vcpu),
awilliam@9279 1491
awilliam@9279 1492 /*
awilliam@9279 1493 * Guest may execute itc.d and rfi with psr.dt=0
awilliam@9279 1494 * When VMM try to fetch opcode, tlb miss may happen,
awilliam@9279 1495 * At this time PSCB(vcpu,metaphysical_mode)=1,
awilliam@9279 1496 * region=5,VMM need to handle this tlb miss as if
awilliam@9279 1497 * PSCB(vcpu,metaphysical_mode)=0
awilliam@9279 1498 */
awilliam@10246 1499 printk("vcpu_translate: bad physical address: 0x%lx at %lx\n",
awilliam@10246 1500 address, vcpu_regs (vcpu)->cr_iip);
awilliam@9504 1501
awilliam@9279 1502 } else {
awilliam@9279 1503 *pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS |
awilliam@9279 1504 _PAGE_PL_2 | _PAGE_AR_RWX;
awilliam@9279 1505 *itir = PAGE_SHIFT << 2;
awilliam@9279 1506 phys_translate_count++;
awilliam@9279 1507 return IA64_NO_FAULT;
djm@7667 1508 }
djm@6458 1509 }
djm@7731 1510 else if (!region && warn_region0_address) {
djm@7727 1511 REGS *regs = vcpu_regs(vcpu);
djm@7727 1512 unsigned long viip = PSCB(vcpu,iip);
djm@7727 1513 unsigned long vipsr = PSCB(vcpu,ipsr);
djm@7727 1514 unsigned long iip = regs->cr_iip;
djm@7727 1515 unsigned long ipsr = regs->cr_ipsr;
awilliam@9005 1516 printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
awilliam@9005 1517 address, viip, vipsr, iip, ipsr);
djm@7727 1518 }
djm@7338 1519
djm@7731 1520 rr = PSCB(vcpu,rrs)[region];
djm@7338 1521 rid = rr & RR_RID_MASK;
djm@7335 1522 if (is_data) {
awilliam@10236 1523 trp = vcpu_tr_lookup(vcpu, address, rid, 1);
awilliam@10236 1524 if (trp != NULL) {
awilliam@10236 1525 *pteval = trp->pte.val;
awilliam@10236 1526 *itir = trp->itir;
awilliam@10236 1527 tr_translate_count++;
awilliam@10236 1528 return IA64_NO_FAULT;
djm@7335 1529 }
djm@7335 1530 }
djm@7335 1531 // FIXME?: check itr's for data accesses too, else bad things happen?
djm@7335 1532 /* else */ {
awilliam@10236 1533 trp = vcpu_tr_lookup(vcpu, address, rid, 0);
awilliam@10236 1534 if (trp != NULL) {
awilliam@10236 1535 *pteval = trp->pte.val;
awilliam@10236 1536 *itir = trp->itir;
awilliam@10236 1537 tr_translate_count++;
awilliam@10236 1538 return IA64_NO_FAULT;
djm@7335 1539 }
djm@6458 1540 }
djm@6458 1541
djm@6458 1542 /* check 1-entry TLB */
djm@7335 1543 // FIXME?: check dtlb for inst accesses too, else bad things happen?
djm@7335 1544 trp = &vcpu->arch.dtlb;
awilliam@9504 1545 pte = trp->pte;
awilliam@9504 1546 if (/* is_data && */ pte.p
awilliam@9504 1547 && vcpu_match_tr_entry_no_p(trp,address,rid)) {
awilliam@9860 1548 *pteval = pte.val;
djm@6458 1549 *itir = trp->itir;
djm@7335 1550 dtlb_translate_count++;
awilliam@9504 1551 return IA64_USE_TLB;
djm@6458 1552 }
djm@6458 1553
djm@6458 1554 /* check guest VHPT */
djm@6458 1555 pta = PSCB(vcpu,pta);
djm@7065 1556 if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
djm@7065 1557 panic_domain(vcpu_regs(vcpu),"can't do long format VHPT\n");
djm@7065 1558 //return (is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR);
djm@7065 1559 }
djm@6458 1560
djm@7338 1561 *itir = rr & (RR_RID_MASK | RR_PS_MASK);
djm@7338 1562 // note: architecturally, iha is optionally set for alt faults but
djm@7338 1563 // xenlinux depends on it so should document it as part of PV interface
djm@7065 1564 vcpu_thash(vcpu, address, iha);
djm@7338 1565 if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE))
djm@7338 1566 return (is_data ? IA64_ALT_DATA_TLB_VECTOR : IA64_ALT_INST_TLB_VECTOR);
djm@7065 1567
djm@7335 1568 /* avoid recursively walking (short format) VHPT */
djm@7338 1569 if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
djm@7338 1570 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
djm@7335 1571
awilliam@9504 1572 if (!__access_ok (*iha)
awilliam@9504 1573 || __copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
djm@7338 1574 // virtual VHPT walker "missed" in TLB
djm@7338 1575 return IA64_VHPT_FAULT;
djm@7335 1576
djm@7338 1577 /*
djm@7338 1578 * Optimisation: this VHPT walker aborts on not-present pages
djm@7338 1579 * instead of inserting a not-present translation, this allows
djm@7338 1580 * vectoring directly to the miss handler.
djm@7338 1581 */
awilliam@9504 1582 if (!pte.p)
djm@7338 1583 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
djm@7338 1584
djm@7338 1585 /* found mapping in guest VHPT! */
djm@7338 1586 *itir = rr & RR_PS_MASK;
awilliam@9504 1587 *pteval = pte.val;
djm@7338 1588 vhpt_translate_count++;
djm@7338 1589 return IA64_NO_FAULT;
djm@6458 1590 }
djm@6458 1591
djm@6458 1592 IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
djm@6458 1593 {
djm@7065 1594 UINT64 pteval, itir, mask, iha;
djm@6458 1595 IA64FAULT fault;
djm@6458 1596
awilliam@9860 1597 fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
awilliam@9504 1598 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
djm@6458 1599 {
djm@6458 1600 mask = itir_mask(itir);
djm@6458 1601 *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
djm@6458 1602 return (IA64_NO_FAULT);
djm@6458 1603 }
djm@7142 1604 return vcpu_force_data_miss(vcpu,vadr);
djm@6458 1605 }
djm@6458 1606
djm@6458 1607 IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
djm@6458 1608 {
djm@6458 1609 printf("vcpu_tak: tak instruction unsupported\n");
djm@6458 1610 return (IA64_ILLOP_FAULT);
djm@6458 1611 // HACK ALERT: tak does a thash for now
djm@6458 1612 //return vcpu_thash(vcpu,vadr,key);
djm@6458 1613 }
djm@6458 1614
djm@6458 1615 /**************************************************************************
djm@6458 1616 VCPU debug breakpoint register access routines
djm@6458 1617 **************************************************************************/
djm@6458 1618
djm@6458 1619 IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
djm@6458 1620 {
djm@6458 1621 // TODO: unimplemented DBRs return a reserved register fault
djm@6458 1622 // TODO: Should set Logical CPU state, not just physical
djm@6458 1623 ia64_set_dbr(reg,val);
djm@6458 1624 return (IA64_NO_FAULT);
djm@6458 1625 }
djm@6458 1626
djm@6458 1627 IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
djm@6458 1628 {
djm@6458 1629 // TODO: unimplemented IBRs return a reserved register fault
djm@6458 1630 // TODO: Should set Logical CPU state, not just physical
djm@6458 1631 ia64_set_ibr(reg,val);
djm@6458 1632 return (IA64_NO_FAULT);
djm@6458 1633 }
djm@6458 1634
djm@6458 1635 IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
djm@6458 1636 {
djm@6458 1637 // TODO: unimplemented DBRs return a reserved register fault
djm@6458 1638 UINT64 val = ia64_get_dbr(reg);
djm@6458 1639 *pval = val;
djm@6458 1640 return (IA64_NO_FAULT);
djm@6458 1641 }
djm@6458 1642
djm@6458 1643 IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
djm@6458 1644 {
djm@6458 1645 // TODO: unimplemented IBRs return a reserved register fault
djm@6458 1646 UINT64 val = ia64_get_ibr(reg);
djm@6458 1647 *pval = val;
djm@6458 1648 return (IA64_NO_FAULT);
djm@6458 1649 }
djm@6458 1650
djm@6458 1651 /**************************************************************************
djm@6458 1652 VCPU performance monitor register access routines
djm@6458 1653 **************************************************************************/
djm@6458 1654
djm@6458 1655 IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
djm@6458 1656 {
djm@6458 1657 // TODO: Should set Logical CPU state, not just physical
djm@6458 1658 // NOTE: Writes to unimplemented PMC registers are discarded
djm@6866 1659 #ifdef DEBUG_PFMON
djm@6866 1660 printf("vcpu_set_pmc(%x,%lx)\n",reg,val);
djm@6866 1661 #endif
djm@6458 1662 ia64_set_pmc(reg,val);
djm@6458 1663 return (IA64_NO_FAULT);
djm@6458 1664 }
djm@6458 1665
djm@6458 1666 IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
djm@6458 1667 {
djm@6458 1668 // TODO: Should set Logical CPU state, not just physical
djm@6458 1669 // NOTE: Writes to unimplemented PMD registers are discarded
djm@6866 1670 #ifdef DEBUG_PFMON
djm@6866 1671 printf("vcpu_set_pmd(%x,%lx)\n",reg,val);
djm@6866 1672 #endif
djm@6458 1673 ia64_set_pmd(reg,val);
djm@6458 1674 return (IA64_NO_FAULT);
djm@6458 1675 }
djm@6458 1676
djm@6458 1677 IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
djm@6458 1678 {
djm@6458 1679 // NOTE: Reads from unimplemented PMC registers return zero
djm@6458 1680 UINT64 val = (UINT64)ia64_get_pmc(reg);
djm@6866 1681 #ifdef DEBUG_PFMON
djm@6866 1682 printf("%lx=vcpu_get_pmc(%x)\n",val,reg);
djm@6866 1683 #endif
djm@6458 1684 *pval = val;
djm@6458 1685 return (IA64_NO_FAULT);
djm@6458 1686 }
djm@6458 1687
djm@6458 1688 IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
djm@6458 1689 {
djm@6458 1690 // NOTE: Reads from unimplemented PMD registers return zero
djm@6458 1691 UINT64 val = (UINT64)ia64_get_pmd(reg);
djm@6866 1692 #ifdef DEBUG_PFMON
djm@6866 1693 printf("%lx=vcpu_get_pmd(%x)\n",val,reg);
djm@6866 1694 #endif
djm@6458 1695 *pval = val;
djm@6458 1696 return (IA64_NO_FAULT);
djm@6458 1697 }
djm@6458 1698
djm@6458 1699 /**************************************************************************
djm@6458 1700 VCPU banked general register access routines
djm@6458 1701 **************************************************************************/
djm@6867 1702 #define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
djm@6867 1703 do{ \
djm@6867 1704 __asm__ __volatile__ ( \
djm@6867 1705 ";;extr.u %0 = %3,%6,16;;\n" \
djm@6867 1706 "dep %1 = %0, %1, 0, 16;;\n" \
djm@6867 1707 "st8 [%4] = %1\n" \
djm@6867 1708 "extr.u %0 = %2, 16, 16;;\n" \
djm@6867 1709 "dep %3 = %0, %3, %6, 16;;\n" \
djm@6867 1710 "st8 [%5] = %3\n" \
djm@6867 1711 ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
djm@6867 1712 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
djm@6867 1713 }while(0)
djm@6458 1714
djm@6458 1715 IA64FAULT vcpu_bsw0(VCPU *vcpu)
djm@6458 1716 {
djm@6458 1717 // TODO: Only allowed for current vcpu
djm@6458 1718 REGS *regs = vcpu_regs(vcpu);
djm@6458 1719 unsigned long *r = &regs->r16;
djm@6458 1720 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
djm@6458 1721 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
djm@6867 1722 unsigned long *runat = &regs->eml_unat;
djm@6867 1723 unsigned long *b0unat = &PSCB(vcpu,vbnat);
djm@6867 1724 unsigned long *b1unat = &PSCB(vcpu,vnat);
djm@6458 1725
djm@6867 1726 unsigned long i;
djm@6867 1727
djm@6867 1728 if(VMX_DOMAIN(vcpu)){
djm@6867 1729 if(VCPU(vcpu,vpsr)&IA64_PSR_BN){
djm@6867 1730 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
djm@6867 1731 vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
djm@6867 1732 VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
djm@6867 1733 }
djm@6867 1734 }else{
djm@6867 1735 if (PSCB(vcpu,banknum)) {
djm@6867 1736 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
djm@6867 1737 vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
djm@6867 1738 PSCB(vcpu,banknum) = 0;
djm@6867 1739 }
djm@6867 1740 }
djm@6458 1741 return (IA64_NO_FAULT);
djm@6458 1742 }
djm@6458 1743
djm@6867 1744 #define vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
djm@6867 1745 do{ \
djm@6867 1746 __asm__ __volatile__ ( \
djm@6867 1747 ";;extr.u %0 = %3,%6,16;;\n" \
djm@6867 1748 "dep %1 = %0, %1, 16, 16;;\n" \
djm@6867 1749 "st8 [%4] = %1\n" \
djm@6867 1750 "extr.u %0 = %2, 0, 16;;\n" \
djm@6867 1751 "dep %3 = %0, %3, %6, 16;;\n" \
djm@6867 1752 "st8 [%5] = %3\n" \
djm@6867 1753 ::"r"(i),"r"(*b0unat),"r"(*b1unat),"r"(*runat),"r"(b0unat), \
djm@6867 1754 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
djm@6867 1755 }while(0)
djm@6867 1756
djm@6458 1757 IA64FAULT vcpu_bsw1(VCPU *vcpu)
djm@6458 1758 {
djm@6458 1759 // TODO: Only allowed for current vcpu
djm@6458 1760 REGS *regs = vcpu_regs(vcpu);
djm@6458 1761 unsigned long *r = &regs->r16;
djm@6458 1762 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
djm@6458 1763 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
djm@6867 1764 unsigned long *runat = &regs->eml_unat;
djm@6867 1765 unsigned long *b0unat = &PSCB(vcpu,vbnat);
djm@6867 1766 unsigned long *b1unat = &PSCB(vcpu,vnat);
djm@6458 1767
djm@6867 1768 unsigned long i;
djm@6867 1769
djm@6867 1770 if(VMX_DOMAIN(vcpu)){
djm@6867 1771 if(!(VCPU(vcpu,vpsr)&IA64_PSR_BN)){
djm@6867 1772 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
djm@6867 1773 vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
djm@6867 1774 VCPU(vcpu,vpsr) |= IA64_PSR_BN;
djm@6867 1775 }
djm@6867 1776 }else{
djm@6867 1777 if (!PSCB(vcpu,banknum)) {
djm@6867 1778 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
djm@6867 1779 vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
djm@6867 1780 PSCB(vcpu,banknum) = 1;
djm@6867 1781 }
djm@6867 1782 }
djm@6458 1783 return (IA64_NO_FAULT);
djm@6458 1784 }
djm@6458 1785
djm@6458 1786 /**************************************************************************
djm@6458 1787 VCPU cpuid access routines
djm@6458 1788 **************************************************************************/
djm@6458 1789
djm@6458 1790
djm@6458 1791 IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
djm@6458 1792 {
djm@6458 1793 // FIXME: This could get called as a result of a rsvd-reg fault
djm@6458 1794 // if reg > 3
djm@6458 1795 switch(reg) {
djm@6458 1796 case 0:
djm@6458 1797 memcpy(pval,"Xen/ia64",8);
djm@6458 1798 break;
djm@6458 1799 case 1:
djm@6458 1800 *pval = 0;
djm@6458 1801 break;
djm@6458 1802 case 2:
djm@6458 1803 *pval = 0;
djm@6458 1804 break;
djm@6458 1805 case 3:
djm@6458 1806 *pval = ia64_get_cpuid(3);
djm@6458 1807 break;
djm@6458 1808 case 4:
djm@6458 1809 *pval = ia64_get_cpuid(4);
djm@6458 1810 break;
djm@6458 1811 default:
djm@6458 1812 if (reg > (ia64_get_cpuid(3) & 0xff))
djm@6458 1813 return IA64_RSVDREG_FAULT;
djm@6458 1814 *pval = ia64_get_cpuid(reg);
djm@6458 1815 break;
djm@6458 1816 }
djm@6458 1817 return (IA64_NO_FAULT);
djm@6458 1818 }
djm@6458 1819
djm@6458 1820 /**************************************************************************
djm@6458 1821 VCPU region register access routines
djm@6458 1822 **************************************************************************/
djm@6458 1823
djm@6458 1824 unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
djm@6458 1825 {
djm@6458 1826 ia64_rr rr;
djm@6458 1827
djm@6458 1828 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
djm@6458 1829 return(rr.ve);
djm@6458 1830 }
djm@6458 1831
djm@6458 1832 IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
djm@6458 1833 {
djm@6458 1834 PSCB(vcpu,rrs)[reg>>61] = val;
djm@6458 1835 // warning: set_one_rr() does it "live"
djm@6458 1836 set_one_rr(reg,val);
djm@6458 1837 return (IA64_NO_FAULT);
djm@6458 1838 }
djm@6458 1839
djm@6458 1840 IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
djm@6458 1841 {
awilliam@9164 1842 if(VMX_DOMAIN(vcpu)){
awilliam@9164 1843 *pval = VMX(vcpu,vrr[reg>>61]);
awilliam@9164 1844 }else{
awilliam@9164 1845 *pval = PSCB(vcpu,rrs)[reg>>61];
awilliam@9164 1846 }
djm@6458 1847 return (IA64_NO_FAULT);
djm@6458 1848 }
djm@6458 1849
djm@6458 1850 /**************************************************************************
djm@6458 1851 VCPU protection key register access routines
djm@6458 1852 **************************************************************************/
djm@6458 1853
djm@6458 1854 IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
djm@6458 1855 {
djm@6458 1856 #ifndef PKR_USE_FIXED
djm@6458 1857 printk("vcpu_get_pkr: called, not implemented yet\n");
djm@6458 1858 return IA64_ILLOP_FAULT;
djm@6458 1859 #else
djm@6458 1860 UINT64 val = (UINT64)ia64_get_pkr(reg);
djm@6458 1861 *pval = val;
djm@6458 1862 return (IA64_NO_FAULT);
djm@6458 1863 #endif
djm@6458 1864 }
djm@6458 1865
djm@6458 1866 IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
djm@6458 1867 {
djm@6458 1868 #ifndef PKR_USE_FIXED
djm@6458 1869 printk("vcpu_set_pkr: called, not implemented yet\n");
djm@6458 1870 return IA64_ILLOP_FAULT;
djm@6458 1871 #else
djm@6458 1872 // if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
djm@6458 1873 vcpu->pkrs[reg] = val;
djm@6458 1874 ia64_set_pkr(reg,val);
djm@6458 1875 return (IA64_NO_FAULT);
djm@6458 1876 #endif
djm@6458 1877 }
djm@6458 1878
djm@6458 1879 /**************************************************************************
djm@6458 1880 VCPU translation register access routines
djm@6458 1881 **************************************************************************/
djm@6458 1882
awilliam@10692 1883 static void
awilliam@10692 1884 vcpu_set_tr_entry_rid(TR_ENTRY *trp, UINT64 pte,
awilliam@10692 1885 UINT64 itir, UINT64 ifa, UINT64 rid)
djm@6458 1886 {
djm@6458 1887 UINT64 ps;
awilliam@9504 1888 union pte_flags new_pte;
djm@6458 1889
djm@6458 1890 trp->itir = itir;
awilliam@10692 1891 trp->rid = rid;
djm@6458 1892 ps = trp->ps;
awilliam@9504 1893 new_pte.val = pte;
awilliam@9504 1894 if (new_pte.pl < 2) new_pte.pl = 2;
djm@6458 1895 trp->vadr = ifa & ~0xfff;
djm@6458 1896 if (ps > 12) { // "ignore" relevant low-order bits
awilliam@9504 1897 new_pte.ppn &= ~((1UL<<(ps-12))-1);
djm@6458 1898 trp->vadr &= ~((1UL<<ps)-1);
djm@6458 1899 }
awilliam@9504 1900
awilliam@9504 1901 /* Atomic write. */
awilliam@9504 1902 trp->pte.val = new_pte.val;
djm@6458 1903 }
djm@6458 1904
awilliam@10692 1905 static inline void
awilliam@10692 1906 vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
awilliam@10692 1907 {
awilliam@10692 1908 vcpu_set_tr_entry_rid(trp, pte, itir, ifa,
awilliam@10692 1909 VCPU(current, rrs[ifa>>61]) & RR_RID_MASK);
awilliam@10692 1910 }
awilliam@10692 1911
djm@6458 1912 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
awilliam@10692 1913 UINT64 itir, UINT64 ifa)
djm@6458 1914 {
djm@6458 1915 TR_ENTRY *trp;
djm@6458 1916
djm@6458 1917 if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
awilliam@10688 1918
awilliam@10688 1919 vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
awilliam@10688 1920
djm@6458 1921 trp = &PSCBX(vcpu,dtrs[slot]);
djm@6458 1922 //printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
djm@6458 1923 vcpu_set_tr_entry(trp,pte,itir,ifa);
djm@7335 1924 vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),ifa);
awilliam@10688 1925
awilliam@10688 1926 vcpu_flush_tlb_vhpt_range(ifa & itir_mask(itir), itir_ps(itir));
awilliam@10688 1927
djm@6458 1928 return IA64_NO_FAULT;
djm@6458 1929 }
djm@6458 1930
djm@6458 1931 IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
awilliam@10692 1932 UINT64 itir, UINT64 ifa)
djm@6458 1933 {
djm@6458 1934 TR_ENTRY *trp;
djm@6458 1935
djm@6458 1936 if (slot >= NITRS) return IA64_RSVDREG_FAULT;
awilliam@10688 1937
awilliam@10688 1938 vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
awilliam@10688 1939
djm@6458 1940 trp = &PSCBX(vcpu,itrs[slot]);
djm@6458 1941 //printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
djm@6458 1942 vcpu_set_tr_entry(trp,pte,itir,ifa);
djm@7335 1943 vcpu_quick_region_set(PSCBX(vcpu,itr_regions),ifa);
awilliam@10688 1944
awilliam@10688 1945 vcpu_flush_tlb_vhpt_range(ifa & itir_mask(itir), itir_ps(itir));
awilliam@10688 1946
djm@6458 1947 return IA64_NO_FAULT;
djm@6458 1948 }
djm@6458 1949
awilliam@10692 1950 IA64FAULT vcpu_set_itr(VCPU *vcpu, u64 slot, u64 pte,
awilliam@10692 1951 u64 itir, u64 ifa, u64 rid)
awilliam@10692 1952 {
awilliam@10692 1953 TR_ENTRY *trp;
awilliam@10692 1954
awilliam@10692 1955 if (slot >= NITRS)
awilliam@10692 1956 return IA64_RSVDREG_FAULT;
awilliam@10692 1957 trp = &PSCBX(vcpu, itrs[slot]);
awilliam@10692 1958 vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);
awilliam@10692 1959
awilliam@10692 1960 /* Recompute the itr_region. */
awilliam@10692 1961 vcpu->arch.itr_regions = 0;
awilliam@10692 1962 for (trp = vcpu->arch.itrs; trp < &vcpu->arch.itrs[NITRS]; trp++)
awilliam@10692 1963 if (trp->pte.p)
awilliam@10692 1964 vcpu_quick_region_set(vcpu->arch.itr_regions,
awilliam@10692 1965 trp->vadr);
awilliam@10692 1966 return IA64_NO_FAULT;
awilliam@10692 1967 }
awilliam@10692 1968
awilliam@10692 1969 IA64FAULT vcpu_set_dtr(VCPU *vcpu, u64 slot, u64 pte,
awilliam@10692 1970 u64 itir, u64 ifa, u64 rid)
awilliam@10692 1971 {
awilliam@10692 1972 TR_ENTRY *trp;
awilliam@10692 1973
awilliam@10692 1974 if (slot >= NDTRS)
awilliam@10692 1975 return IA64_RSVDREG_FAULT;
awilliam@10692 1976 trp = &PSCBX(vcpu, dtrs[slot]);
awilliam@10692 1977 vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);
awilliam@10692 1978
awilliam@10692 1979 /* Recompute the dtr_region. */
awilliam@10692 1980 vcpu->arch.dtr_regions = 0;
awilliam@10692 1981 for (trp = vcpu->arch.dtrs; trp < &vcpu->arch.dtrs[NDTRS]; trp++)
awilliam@10692 1982 if (trp->pte.p)
awilliam@10692 1983 vcpu_quick_region_set(vcpu->arch.dtr_regions,
awilliam@10692 1984 trp->vadr);
awilliam@10692 1985 return IA64_NO_FAULT;
awilliam@10692 1986 }
awilliam@10692 1987
djm@6458 1988 /**************************************************************************
djm@6458 1989 VCPU translation cache access routines
djm@6458 1990 **************************************************************************/
djm@6458 1991
djm@6458 1992 void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 mp_pte, UINT64 logps)
djm@6458 1993 {
djm@6458 1994 unsigned long psr;
djm@6458 1995 unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
djm@6458 1996
awilliam@10436 1997 check_xen_space_overlap ("itc", vaddr, 1UL << logps);
awilliam@10436 1998
djm@6458 1999 // FIXME, must be inlined or potential for nested fault here!
awilliam@10436 2000 if ((vcpu->domain==dom0) && (logps < PAGE_SHIFT))
awilliam@10436 2001 panic_domain (NULL, "vcpu_itc_no_srlz: domain trying to use "
awilliam@10436 2002 "smaller page size!\n");
awilliam@10436 2003
awilliam@9756 2004 #ifdef CONFIG_XEN_IA64_DOM0_VP
awilliam@9756 2005 BUG_ON(logps > PAGE_SHIFT);
awilliam@9756 2006 #endif
djm@6458 2007 psr = ia64_clear_ic();
djm@6458 2008 ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
djm@6458 2009 ia64_set_psr(psr);
djm@6458 2010 // ia64_srlz_i(); // no srls req'd, will rfi later
djm@6458 2011 #ifdef VHPT_GLOBAL
djm@6458 2012 if (vcpu->domain==dom0 && ((vaddr >> 61) == 7)) {
djm@6458 2013 // FIXME: this is dangerous... vhpt_flush_address ensures these
djm@6458 2014 // addresses never get flushed. More work needed if this
djm@6458 2015 // ever happens.
djm@6458 2016 //printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
djm@6458 2017 if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
djm@6458 2018 else vhpt_insert(vaddr,pte,logps<<2);
djm@6458 2019 }
djm@6458 2020 // even if domain pagesize is larger than PAGE_SIZE, just put
djm@6458 2021 // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
djm@6458 2022 else vhpt_insert(vaddr,pte,PAGE_SHIFT<<2);
djm@6458 2023 #endif
djm@7727 2024 if ((mp_pte == -1UL) || (IorD & 0x4)) // don't place in 1-entry TLB
djm@7727 2025 return;
djm@6458 2026 if (IorD & 0x1) {
awilliam@9860 2027 vcpu_set_tr_entry(&PSCBX(vcpu,itlb),mp_pte,ps<<2,vaddr);
djm@6458 2028 }
djm@6458 2029 if (IorD & 0x2) {
awilliam@9860 2030 vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),mp_pte,ps<<2,vaddr);
djm@6458 2031 }
djm@6458 2032 }
djm@6458 2033
djm@6458 2034 IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
djm@6458 2035 {
awilliam@8906 2036 unsigned long pteval, logps = itir_ps(itir);
djm@7727 2037 BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
awilliam@10423 2038 struct p2m_entry entry;
djm@6458 2039
awilliam@10436 2040 if (logps < PAGE_SHIFT)
awilliam@10436 2041 panic_domain (NULL, "vcpu_itc_d: domain trying to use "
awilliam@10436 2042 "smaller page size!\n");
awilliam@10436 2043
awilliam@10423 2044 again:
djm@6458 2045 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
awilliam@10423 2046 pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
djm@6458 2047 if (!pteval) return IA64_ILLOP_FAULT;
djm@7727 2048 if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
djm@6458 2049 vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
djm@7727 2050 if (swap_rr0) set_metaphysical_rr0();
awilliam@10423 2051 if (p2m_entry_retry(&entry)) {
awilliam@10688 2052 vcpu_flush_tlb_vhpt_range(ifa, logps);
awilliam@10423 2053 goto again;
awilliam@10423 2054 }
djm@6458 2055 return IA64_NO_FAULT;
djm@6458 2056 }
djm@6458 2057
djm@6458 2058 IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
djm@6458 2059 {
awilliam@8906 2060 unsigned long pteval, logps = itir_ps(itir);
djm@7727 2061 BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
awilliam@10423 2062 struct p2m_entry entry;
djm@6458 2063
awilliam@10436 2064 if (logps < PAGE_SHIFT)
awilliam@10436 2065 panic_domain (NULL, "vcpu_itc_i: domain trying to use "
awilliam@10436 2066 "smaller page size!\n");
awilliam@10423 2067 again:
djm@6458 2068 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
awilliam@10423 2069 pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
djm@6458 2070 if (!pteval) return IA64_ILLOP_FAULT;
djm@7727 2071 if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
djm@6458 2072 vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,pte,logps);
djm@7727 2073 if (swap_rr0) set_metaphysical_rr0();
awilliam@10423 2074 if (p2m_entry_retry(&entry)) {
awilliam@10688 2075 vcpu_flush_tlb_vhpt_range(ifa, logps);
awilliam@10423 2076 goto again;
awilliam@10423 2077 }
djm@6458 2078 return IA64_NO_FAULT;
djm@6458 2079 }
djm@6458 2080
awilliam@10147 2081 IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 log_range)
djm@6458 2082 {
awilliam@10262 2083 BUG_ON(vcpu != current);
awilliam@10262 2084
awilliam@10436 2085 check_xen_space_overlap ("ptc_l", vadr, 1UL << log_range);
awilliam@10436 2086
awilliam@10147 2087 /* Purge TC */
awilliam@10147 2088 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
awilliam@10147 2089 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
awilliam@10147 2090
awilliam@10246 2091 /* Purge all tlb and vhpt */
awilliam@10147 2092 vcpu_flush_tlb_vhpt_range (vadr, log_range);
awilliam@10147 2093
awilliam@10147 2094 return IA64_NO_FAULT;
djm@6458 2095 }
awilliam@10246 2096
djm@6458 2097 // At privlvl=0, fc performs no access rights or protection key checks, while
djm@6458 2098 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
djm@6458 2099 // read but no protection key check. Thus in order to avoid an unexpected
djm@6458 2100 // access rights fault, we have to translate the virtual address to a
djm@6458 2101 // physical address (possibly via a metaphysical address) and do the fc
djm@6458 2102 // on the physical address, which is guaranteed to flush the same cache line
djm@6458 2103 IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
djm@6458 2104 {
djm@6458 2105 // TODO: Only allowed for current vcpu
djm@6458 2106 UINT64 mpaddr, paddr;
djm@6458 2107 IA64FAULT fault;
djm@6458 2108
awilliam@10423 2109 again:
djm@6458 2110 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
djm@6458 2111 if (fault == IA64_NO_FAULT) {
awilliam@10423 2112 struct p2m_entry entry;
awilliam@10423 2113 paddr = translate_domain_mpaddr(mpaddr, &entry);
djm@6458 2114 ia64_fc(__va(paddr));
awilliam@10423 2115 if (p2m_entry_retry(&entry))
awilliam@10423 2116 goto again;
djm@6458 2117 }
djm@6458 2118 return fault;
djm@6458 2119 }
djm@6458 2120
djm@6458 2121 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
djm@6458 2122 {
djm@6458 2123 // Note that this only needs to be called once, i.e. the
djm@6458 2124 // architected loop to purge the entire TLB, should use
djm@6458 2125 // base = stride1 = stride2 = 0, count0 = count 1 = 1
djm@6458 2126
awilliam@10572 2127 vcpu_flush_vtlb_all(current);
awilliam@9860 2128
djm@6458 2129 return IA64_NO_FAULT;
djm@6458 2130 }
djm@6458 2131
djm@6458 2132 IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
djm@6458 2133 {
djm@6458 2134 printk("vcpu_ptc_g: called, not implemented yet\n");
djm@6458 2135 return IA64_ILLOP_FAULT;
djm@6458 2136 }
djm@6458 2137
djm@6458 2138 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
djm@6458 2139 {
djm@6458 2140 // FIXME: validate not flushing Xen addresses
djm@6458 2141 // if (Xen address) return(IA64_ILLOP_FAULT);
djm@6458 2142 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
djm@6458 2143 //printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
awilliam@9408 2144
awilliam@10436 2145 check_xen_space_overlap ("ptc_ga", vadr, addr_range);
awilliam@10436 2146
awilliam@10013 2147 domain_flush_vtlb_range (vcpu->domain, vadr, addr_range);
awilliam@9504 2148
djm@6458 2149 return IA64_NO_FAULT;
djm@6458 2150 }
djm@6458 2151
awilliam@10246 2152 IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 log_range)
djm@6458 2153 {
awilliam@10246 2154 unsigned long region = vadr >> 61;
awilliam@10246 2155 u64 addr_range = 1UL << log_range;
awilliam@10246 2156 unsigned long rid, rr;
awilliam@10246 2157 int i;
awilliam@10246 2158 TR_ENTRY *trp;
awilliam@10436 2159
awilliam@10262 2160 BUG_ON(vcpu != current);
awilliam@10436 2161 check_xen_space_overlap ("ptr_d", vadr, 1UL << log_range);
awilliam@10246 2162
awilliam@10246 2163 rr = PSCB(vcpu,rrs)[region];
awilliam@10246 2164 rid = rr & RR_RID_MASK;
awilliam@10246 2165
awilliam@10246 2166 /* Purge TC */
awilliam@10246 2167 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
awilliam@10246 2168
awilliam@10246 2169 /* Purge tr and recompute dtr_regions. */
awilliam@10246 2170 vcpu->arch.dtr_regions = 0;
awilliam@10246 2171 for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++)
awilliam@10246 2172 if (vcpu_match_tr_entry_range (trp,rid, vadr, vadr+addr_range))
awilliam@10246 2173 vcpu_purge_tr_entry(trp);
awilliam@10246 2174 else if (trp->pte.p)
awilliam@10246 2175 vcpu_quick_region_set(vcpu->arch.dtr_regions,
awilliam@10246 2176 trp->vadr);
awilliam@10246 2177
awilliam@10246 2178 vcpu_flush_tlb_vhpt_range (vadr, log_range);
awilliam@10246 2179
awilliam@10246 2180 return IA64_NO_FAULT;
djm@6458 2181 }
djm@6458 2182
awilliam@10246 2183 IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 log_range)
djm@6458 2184 {
awilliam@10246 2185 unsigned long region = vadr >> 61;
awilliam@10246 2186 u64 addr_range = 1UL << log_range;
awilliam@10246 2187 unsigned long rid, rr;
awilliam@10246 2188 int i;
awilliam@10246 2189 TR_ENTRY *trp;
awilliam@10436 2190
awilliam@10262 2191 BUG_ON(vcpu != current);
awilliam@10436 2192 check_xen_space_overlap ("ptr_i", vadr, 1UL << log_range);
awilliam@10246 2193
awilliam@10246 2194 rr = PSCB(vcpu,rrs)[region];
awilliam@10246 2195 rid = rr & RR_RID_MASK;
awilliam@10246 2196
awilliam@10246 2197 /* Purge TC */
awilliam@10246 2198 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
awilliam@10246 2199
awilliam@10246 2200 /* Purge tr and recompute itr_regions. */
awilliam@10246 2201 vcpu->arch.itr_regions = 0;
awilliam@10246 2202 for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++)
awilliam@10246 2203 if (vcpu_match_tr_entry_range (trp,rid, vadr, vadr+addr_range))
awilliam@10246 2204 vcpu_purge_tr_entry(trp);
awilliam@10246 2205 else if (trp->pte.p)
awilliam@10246 2206 vcpu_quick_region_set(vcpu->arch.itr_regions,
awilliam@10246 2207 trp->vadr);
awilliam@10246 2208
awilliam@10246 2209 vcpu_flush_tlb_vhpt_range (vadr, log_range);
awilliam@10246 2210
awilliam@10246 2211 return IA64_NO_FAULT;
djm@6458 2212 }