ia64/xen-unstable
changeset 16332:166bf3b04495
[IA64] vti save-restore: clean up of PV region register handling.
Fix rr handling to avoid resrved registers/field fault in xen
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Fix rr handling to avoid resrved registers/field fault in xen
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author | Alex Williamson <alex.williamson@hp.com> |
---|---|
date | Wed Nov 07 10:07:06 2007 -0700 (2007-11-07) |
parents | 7ac9bfbc24e2 |
children | 828cb584c1cc |
files | xen/arch/ia64/vmx/vmx_utility.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/regionreg.c xen/arch/ia64/xen/vcpu.c xen/include/asm-ia64/regionreg.h |
line diff
1.1 --- a/xen/arch/ia64/vmx/vmx_utility.c Wed Nov 07 09:47:40 2007 -0700 1.2 +++ b/xen/arch/ia64/vmx/vmx_utility.c Wed Nov 07 10:07:06 2007 -0700 1.3 @@ -637,10 +637,9 @@ int is_reserved_itir_field(VCPU* vcpu, u 1.4 return 0; 1.5 } 1.6 1.7 -int is_reserved_rr_field(VCPU* vcpu, u64 reg_value) 1.8 +static int __is_reserved_rr_field(u64 reg_value) 1.9 { 1.10 - ia64_rr rr; 1.11 - rr.rrval = reg_value; 1.12 + ia64_rr rr = { .rrval = reg_value }; 1.13 1.14 if(rr.reserved0 != 0 || rr.reserved1 != 0){ 1.15 return 1; 1.16 @@ -656,3 +655,20 @@ int is_reserved_rr_field(VCPU* vcpu, u64 1.17 return 0; 1.18 } 1.19 1.20 +int is_reserved_rr_rid(VCPU* vcpu, u64 reg_value) 1.21 +{ 1.22 + ia64_rr rr = { .rrval = reg_value }; 1.23 + 1.24 + if (rr.rid >= (1UL << vcpu->domain->arch.rid_bits)) 1.25 + return 1; 1.26 + 1.27 + return 0; 1.28 +} 1.29 + 1.30 +int is_reserved_rr_field(VCPU* vcpu, u64 reg_value) 1.31 +{ 1.32 + if (__is_reserved_rr_field(reg_value)) 1.33 + return 1; 1.34 + 1.35 + return is_reserved_rr_rid(vcpu, reg_value); 1.36 +}
2.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c Wed Nov 07 09:47:40 2007 -0700 2.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c Wed Nov 07 10:07:06 2007 -0700 2.3 @@ -161,12 +161,12 @@ IA64FAULT vmx_vcpu_cover(VCPU *vcpu) 2.4 2.5 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val) 2.6 { 2.7 - ia64_rr newrr; 2.8 u64 rrval; 2.9 2.10 - newrr.rrval=val; 2.11 - if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits)) 2.12 - panic_domain (NULL, "use of invalid rid %x\n", newrr.rid); 2.13 + if (unlikely(is_reserved_rr_rid(vcpu, val))) { 2.14 + gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val); 2.15 + return IA64_RSVDREG_FAULT; 2.16 + } 2.17 2.18 VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val; 2.19 switch((u64)(reg>>VRN_SHIFT)) {
3.1 --- a/xen/arch/ia64/xen/domain.c Wed Nov 07 09:47:40 2007 -0700 3.2 +++ b/xen/arch/ia64/xen/domain.c Wed Nov 07 10:07:06 2007 -0700 3.3 @@ -1627,6 +1627,7 @@ domain_set_shared_info_va (unsigned long 3.4 { 3.5 struct vcpu *v = current; 3.6 struct domain *d = v->domain; 3.7 + int rc; 3.8 3.9 /* Check virtual address: 3.10 must belong to region 7, 3.11 @@ -1648,9 +1649,10 @@ domain_set_shared_info_va (unsigned long 3.12 __ia64_per_cpu_var(current_psr_ic_addr) = (int *)(va + XSI_PSR_IC_OFS); 3.13 3.14 /* Remap the shared pages. */ 3.15 - set_one_rr (7UL << 61, PSCB(v,rrs[7])); 3.16 + rc = !set_one_rr(7UL << 61, PSCB(v,rrs[7])); 3.17 + BUG_ON(rc); 3.18 3.19 - return 0; 3.20 + return rc; 3.21 } 3.22 3.23 /* Transfer and clear the shadow bitmap in 1kB chunks for L1 cache. */
4.1 --- a/xen/arch/ia64/xen/regionreg.c Wed Nov 07 09:47:40 2007 -0700 4.2 +++ b/xen/arch/ia64/xen/regionreg.c Wed Nov 07 10:07:06 2007 -0700 4.3 @@ -238,14 +238,12 @@ int set_one_rr(unsigned long rr, unsigne 4.4 ia64_rr rrv, newrrv, memrrv; 4.5 unsigned long newrid; 4.6 4.7 - if (val == -1) 4.8 - return 1; 4.9 - 4.10 rrv.rrval = val; 4.11 newrrv.rrval = 0; 4.12 newrid = v->arch.starting_rid + rrv.rid; 4.13 4.14 - if (newrid > v->arch.ending_rid) { 4.15 + // avoid reserved register/field fault 4.16 + if (unlikely(is_reserved_rr_field(v, val))) { 4.17 printk("can't set rr%d to %lx, starting_rid=%x," 4.18 "ending_rid=%x, val=%lx\n", (int) rreg, newrid, 4.19 v->arch.starting_rid,v->arch.ending_rid,val); 4.20 @@ -295,12 +293,11 @@ void init_all_rr(struct vcpu *v) 4.21 ia64_rr rrv; 4.22 4.23 rrv.rrval = 0; 4.24 - //rrv.rrval = v->domain->arch.metaphysical_rr0; 4.25 rrv.ps = v->arch.vhpt_pg_shift; 4.26 rrv.ve = 1; 4.27 if (!v->vcpu_info) 4.28 panic("Stopping in init_all_rr\n"); 4.29 - VCPU(v,rrs[0]) = -1; 4.30 + VCPU(v,rrs[0]) = rrv.rrval; 4.31 VCPU(v,rrs[1]) = rrv.rrval; 4.32 VCPU(v,rrs[2]) = rrv.rrval; 4.33 VCPU(v,rrs[3]) = rrv.rrval; 4.34 @@ -308,7 +305,7 @@ void init_all_rr(struct vcpu *v) 4.35 VCPU(v,rrs[5]) = rrv.rrval; 4.36 rrv.ve = 0; 4.37 VCPU(v,rrs[6]) = rrv.rrval; 4.38 -// v->shared_info->arch.rrs[7] = rrv.rrval; 4.39 + VCPU(v,rrs[7]) = rrv.rrval; 4.40 } 4.41 4.42
5.1 --- a/xen/arch/ia64/xen/vcpu.c Wed Nov 07 09:47:40 2007 -0700 5.2 +++ b/xen/arch/ia64/xen/vcpu.c Wed Nov 07 10:07:06 2007 -0700 5.3 @@ -287,7 +287,7 @@ static void vcpu_set_metaphysical_mode(V 5.4 PSCB(vcpu, metaphysical_mode) = newmode; 5.5 if (newmode) 5.6 set_metaphysical_rr0(); 5.7 - else if (PSCB(vcpu, rrs[0]) != -1) 5.8 + else 5.9 set_virtual_rr0(); 5.10 } 5.11 } 5.12 @@ -2095,9 +2095,16 @@ unsigned long vcpu_get_rr_ve(VCPU * vcpu 5.13 5.14 IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val) 5.15 { 5.16 + if (unlikely(is_reserved_rr_field(vcpu, val))) { 5.17 + gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val); 5.18 + return IA64_RSVDREG_FAULT; 5.19 + } 5.20 + 5.21 PSCB(vcpu, rrs)[reg >> 61] = val; 5.22 - if (vcpu == current) 5.23 - set_one_rr(reg, val); 5.24 + if (likely(vcpu == current)) { 5.25 + int rc = set_one_rr(reg, val); 5.26 + BUG_ON(rc == 0); 5.27 + } 5.28 return IA64_NO_FAULT; 5.29 } 5.30 5.31 @@ -2120,17 +2127,30 @@ IA64FAULT vcpu_set_rr0_to_rr4(VCPU * vcp 5.32 u64 reg3 = 0x6000000000000000UL; 5.33 u64 reg4 = 0x8000000000000000UL; 5.34 5.35 + if (unlikely(is_reserved_rr_field(vcpu, val0) || 5.36 + is_reserved_rr_field(vcpu, val1) || 5.37 + is_reserved_rr_field(vcpu, val2) || 5.38 + is_reserved_rr_field(vcpu, val3) || 5.39 + is_reserved_rr_field(vcpu, val4))) { 5.40 + gdprintk(XENLOG_DEBUG, 5.41 + "use of invalid rrval %lx %lx %lx %lx %lx\n", 5.42 + val0, val1, val2, val3, val4); 5.43 + return IA64_RSVDREG_FAULT; 5.44 + } 5.45 + 5.46 PSCB(vcpu, rrs)[reg0 >> 61] = val0; 5.47 PSCB(vcpu, rrs)[reg1 >> 61] = val1; 5.48 PSCB(vcpu, rrs)[reg2 >> 61] = val2; 5.49 PSCB(vcpu, rrs)[reg3 >> 61] = val3; 5.50 PSCB(vcpu, rrs)[reg4 >> 61] = val4; 5.51 - if (vcpu == current) { 5.52 - set_one_rr(reg0, val0); 5.53 - set_one_rr(reg1, val1); 5.54 - set_one_rr(reg2, val2); 5.55 - set_one_rr(reg3, val3); 5.56 - set_one_rr(reg4, val4); 5.57 + if (likely(vcpu == current)) { 5.58 + int rc; 5.59 + rc = !set_one_rr(reg0, val0); 5.60 + rc |= !set_one_rr(reg1, val1); 5.61 + rc |= !set_one_rr(reg2, val2); 5.62 + rc |= !set_one_rr(reg3, val3); 5.63 + rc |= !set_one_rr(reg4, val4); 5.64 + BUG_ON(rc != 0); 5.65 } 5.66 return IA64_NO_FAULT; 5.67 }
6.1 --- a/xen/include/asm-ia64/regionreg.h Wed Nov 07 09:47:40 2007 -0700 6.2 +++ b/xen/include/asm-ia64/regionreg.h Wed Nov 07 10:07:06 2007 -0700 6.3 @@ -1,3 +1,4 @@ 6.4 + 6.5 #ifndef _REGIONREG_H_ 6.6 #define _REGIONREG_H_ 6.7 6.8 @@ -85,6 +86,9 @@ extern void set_metaphysical_rr0(void); 6.9 6.10 extern void load_region_regs(struct vcpu *v); 6.11 6.12 +extern int is_reserved_rr_rid(struct vcpu *vcpu, u64 reg_value); 6.13 +extern int is_reserved_rr_field(struct vcpu *vcpu, u64 reg_value); 6.14 + 6.15 #endif /* !_REGIONREG_H_ */ 6.16 6.17 /*