direct-io.hg

changeset 11940:53ec7e3d3a8a

[IA64] Fix a VTi physical mode bug

When guest writes rr in physical mode, if it is rr0 or rr4, Xen can't
write it into machine rr.

Signed-off-by: Xuefei Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Sun Oct 01 11:19:45 2006 -0600 (2006-10-01)
parents a947ca5d4731
children 914c44d10c8d
files xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_vcpu.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Sun Oct 01 11:14:00 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Sun Oct 01 11:19:45 2006 -0600
     1.3 @@ -126,10 +126,16 @@ void
     1.4  vmx_init_all_rr(VCPU *vcpu)
     1.5  {
     1.6  	VMX(vcpu, vrr[VRN0]) = 0x38;
     1.7 +	// enable vhpt in guest physical mode
     1.8 +	vcpu->arch.metaphysical_rr0 |= 1;
     1.9 +	vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38);
    1.10  	VMX(vcpu, vrr[VRN1]) = 0x38;
    1.11  	VMX(vcpu, vrr[VRN2]) = 0x38;
    1.12  	VMX(vcpu, vrr[VRN3]) = 0x38;
    1.13  	VMX(vcpu, vrr[VRN4]) = 0x38;
    1.14 +	// enable vhpt in guest physical mode
    1.15 +	vcpu->arch.metaphysical_rr4 |= 1;
    1.16 +	vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38);
    1.17  	VMX(vcpu, vrr[VRN5]) = 0x38;
    1.18  	VMX(vcpu, vrr[VRN6]) = 0x38;
    1.19  	VMX(vcpu, vrr[VRN7]) = 0x738;
    1.20 @@ -141,11 +147,9 @@ void
    1.21  vmx_load_all_rr(VCPU *vcpu)
    1.22  {
    1.23  	unsigned long psr;
    1.24 -	ia64_rr phy_rr;
    1.25  
    1.26  	local_irq_save(psr);
    1.27  
    1.28 -
    1.29  	/* WARNING: not allow co-exist of both virtual mode and physical
    1.30  	 * mode in same region
    1.31  	 */
    1.32 @@ -154,24 +158,16 @@ vmx_load_all_rr(VCPU *vcpu)
    1.33  			panic_domain(vcpu_regs(vcpu),
    1.34  			             "Unexpected domain switch in phy emul\n");
    1.35  		}
    1.36 -		phy_rr.rrval = vcpu->arch.metaphysical_rr0;
    1.37 -		//phy_rr.ps = PAGE_SHIFT;
    1.38 -		phy_rr.ve = 1;
    1.39 -
    1.40 -		ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
    1.41 +		ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
    1.42  		ia64_dv_serialize_data();
    1.43 -		phy_rr.rrval = vcpu->arch.metaphysical_rr4;
    1.44 -		//phy_rr.ps = PAGE_SHIFT;
    1.45 -		phy_rr.ve = 1;
    1.46 -
    1.47 -		ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
    1.48 +		ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
    1.49  		ia64_dv_serialize_data();
    1.50  	} else {
    1.51  		ia64_set_rr((VRN0 << VRN_SHIFT),
    1.52 -			     vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
    1.53 +                            vcpu->arch.metaphysical_saved_rr0);
    1.54  		ia64_dv_serialize_data();
    1.55  		ia64_set_rr((VRN4 << VRN_SHIFT),
    1.56 -			     vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4])));
    1.57 +                            vcpu->arch.metaphysical_saved_rr4);
    1.58  		ia64_dv_serialize_data();
    1.59  	}
    1.60  
    1.61 @@ -209,21 +205,11 @@ void
    1.62  switch_to_physical_rid(VCPU *vcpu)
    1.63  {
    1.64      UINT64 psr;
    1.65 -    ia64_rr phy_rr, mrr;
    1.66 -
    1.67      /* Save original virtual mode rr[0] and rr[4] */
    1.68      psr=ia64_clear_ic();
    1.69 -    phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
    1.70 -    mrr.rrval = ia64_get_rr(VRN0 << VRN_SHIFT);
    1.71 -    phy_rr.ps = mrr.ps;
    1.72 -    phy_rr.ve = 1;
    1.73 -    ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
    1.74 +    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
    1.75      ia64_srlz_d();
    1.76 -    phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
    1.77 -    mrr.rrval = ia64_get_rr(VRN4 << VRN_SHIFT);
    1.78 -    phy_rr.ps = mrr.ps;
    1.79 -    phy_rr.ve = 1;
    1.80 -    ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
    1.81 +    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
    1.82      ia64_srlz_d();
    1.83  
    1.84      ia64_set_psr(psr);
    1.85 @@ -236,15 +222,10 @@ void
    1.86  switch_to_virtual_rid(VCPU *vcpu)
    1.87  {
    1.88      UINT64 psr;
    1.89 -    ia64_rr mrr;
    1.90 -
    1.91      psr=ia64_clear_ic();
    1.92 -
    1.93 -    vcpu_get_rr(vcpu,VRN0<<VRN_SHIFT,&mrr.rrval);
    1.94 -    ia64_set_rr(VRN0<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval));
    1.95 +    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
    1.96      ia64_srlz_d();
    1.97 -    vcpu_get_rr(vcpu,VRN4<<VRN_SHIFT,&mrr.rrval);
    1.98 -    ia64_set_rr(VRN4<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval));
    1.99 +    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
   1.100      ia64_srlz_d();
   1.101      ia64_set_psr(psr);
   1.102      ia64_srlz_i();
     2.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Sun Oct 01 11:14:00 2006 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Sun Oct 01 11:19:45 2006 -0600
     2.3 @@ -212,19 +212,32 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI
     2.4  {
     2.5      ia64_rr oldrr,newrr;
     2.6      extern void * pal_vaddr;
     2.7 +    u64 rrval;
     2.8  
     2.9      vcpu_get_rr(vcpu, reg, &oldrr.rrval);
    2.10      newrr.rrval=val;
    2.11      if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits))
    2.12          panic_domain (NULL, "use of invalid rid %x\n", newrr.rid);
    2.13  
    2.14 -    VMX(vcpu,vrr[reg>>61]) = val;
    2.15 -    switch((u64)(reg>>61)) {
    2.16 +    VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
    2.17 +    switch((u64)(reg>>VRN_SHIFT)) {
    2.18      case VRN7:
    2.19          vmx_switch_rr7(vrrtomrr(vcpu,val),vcpu->domain->shared_info,
    2.20          (void *)vcpu->arch.privregs,
    2.21          (void *)vcpu->arch.vhpt.hash, pal_vaddr );
    2.22         break;
    2.23 +    case VRN4:
    2.24 +        rrval = vrrtomrr(vcpu,val);
    2.25 +        vcpu->arch.metaphysical_saved_rr4 = rrval;
    2.26 +        if (!is_physical_mode(vcpu))
    2.27 +            ia64_set_rr(reg,rrval);
    2.28 +        break;
    2.29 +    case VRN0:
    2.30 +        rrval = vrrtomrr(vcpu,val);
    2.31 +        vcpu->arch.metaphysical_saved_rr0 = rrval;
    2.32 +        if (!is_physical_mode(vcpu))
    2.33 +            ia64_set_rr(reg,rrval);
    2.34 +        break;
    2.35      default:
    2.36          ia64_set_rr(reg,vrrtomrr(vcpu,val));
    2.37          break;