ia64/xen-unstable

changeset 18362:6607624285b2

[IA64] EFI mapping: restoring mapping correctly.

When swiching back from efi mapping, correctly switch back
depending on the current vcpu type.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Aug 25 19:04:37 2008 +0900 (2008-08-25)
parents 9112c53b70cd
children 1d565c0429cb
files xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/xen/regionreg.c xen/include/asm-ia64/linux-xen/linux/efi.h xen/include/asm-ia64/regionreg.h xen/include/asm-ia64/vmx_vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Mon Aug 25 19:04:37 2008 +0900
     1.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Mon Aug 25 19:04:37 2008 +0900
     1.3 @@ -169,9 +169,7 @@ vmx_load_all_rr(VCPU *vcpu)
     1.4  	ia64_dv_serialize_data();
     1.5  	ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
     1.6  	ia64_dv_serialize_data();
     1.7 -	vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
     1.8 -                      (void *)vcpu->arch.vhpt.hash,
     1.9 -		       vcpu->arch.privregs);
    1.10 +	vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, VMX(vcpu, vrr[VRN7])));
    1.11  	ia64_set_pta(VMX(vcpu, mpta));
    1.12  	vmx_ia64_set_dcr(vcpu);
    1.13  
     2.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Mon Aug 25 19:04:37 2008 +0900
     2.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Mon Aug 25 19:04:37 2008 +0900
     2.3 @@ -196,13 +196,17 @@ void vmx_vcpu_set_rr_fast(VCPU *vcpu, u6
     2.4      }
     2.5  }
     2.6  
     2.7 -void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
     2.8 -                    void *shared_arch_info)
     2.9 +void __vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid)
    2.10  {
    2.11 -    __get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt;
    2.12 -    __get_cpu_var(inserted_vpd) = (unsigned long)shared_arch_info;
    2.13 -    __get_cpu_var(inserted_mapped_regs) = (unsigned long)shared_arch_info;
    2.14 -    __vmx_switch_rr7(rid, guest_vhpt, shared_arch_info);
    2.15 +    __vmx_switch_rr7(rid, (void *)v->arch.vhpt.hash, v->arch.privregs);
    2.16 +}
    2.17 +
    2.18 +void vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid)
    2.19 +{
    2.20 +    __get_cpu_var(inserted_vhpt) = (unsigned long)v->arch.vhpt.hash;
    2.21 +    __get_cpu_var(inserted_vpd) = (unsigned long)v->arch.privregs;
    2.22 +    __get_cpu_var(inserted_mapped_regs) = (unsigned long)v->arch.privregs;
    2.23 +    __vmx_switch_rr7_vcpu(v, rid);
    2.24  }
    2.25  
    2.26  IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
    2.27 @@ -218,8 +222,7 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u6
    2.28      switch((u64)(reg>>VRN_SHIFT)) {
    2.29      case VRN7:
    2.30          if (likely(vcpu == current))
    2.31 -            vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash,
    2.32 -                           vcpu->arch.privregs);
    2.33 +            vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, val));
    2.34         break;
    2.35      case VRN4:
    2.36          rrval = vrrtomrr(vcpu,val);
     3.1 --- a/xen/arch/ia64/xen/regionreg.c	Mon Aug 25 19:04:37 2008 +0900
     3.2 +++ b/xen/arch/ia64/xen/regionreg.c	Mon Aug 25 19:04:37 2008 +0900
     3.3 @@ -18,9 +18,12 @@
     3.4  #include <asm/vcpu.h>
     3.5  #include <asm/percpu.h>
     3.6  #include <asm/pal.h>
     3.7 +#include <asm/vmx_vcpu.h>
     3.8  
     3.9  /* Defined in xemasm.S  */
    3.10 -extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long va_vhpt);
    3.11 +extern void ia64_new_rr7(unsigned long rid, void *shared_info,
    3.12 +			 void *shared_arch_info, unsigned long shared_info_va,
    3.13 +			 unsigned long va_vhpt);
    3.14  extern void ia64_new_rr7_efi(unsigned long rid, unsigned long repin_percpu,
    3.15  			     unsigned long vpd);
    3.16  
    3.17 @@ -239,6 +242,14 @@ set_rr(unsigned long rr, unsigned long r
    3.18  	ia64_srlz_d();
    3.19  }
    3.20  
    3.21 +static inline void
    3.22 +ia64_new_rr7_vcpu(struct vcpu *v, unsigned long rid)
    3.23 +{
    3.24 +	ia64_new_rr7(rid, v->domain->shared_info,
    3.25 +		     v->arch.privregs, v->domain->arch.shared_info_va,
    3.26 +		     __va_ul(vcpu_vhpt_maddr(v)));
    3.27 +}
    3.28 +
    3.29  // validates and changes a single region register
    3.30  // in the currently executing domain
    3.31  // Passing a value of -1 is a (successful) no-op
    3.32 @@ -282,9 +293,7 @@ int set_one_rr(unsigned long rr, unsigne
    3.33  		__get_cpu_var(inserted_mapped_regs) =
    3.34  					v->domain->arch.shared_info_va +
    3.35  					XMAPPEDREGS_OFS;
    3.36 -		ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
    3.37 -			     v->arch.privregs, v->domain->arch.shared_info_va,
    3.38 -		             __va_ul(vcpu_vhpt_maddr(v)));
    3.39 +		ia64_new_rr7_vcpu(v, vmMangleRID(newrrv.rrval));
    3.40  	} else {
    3.41  		set_rr(rr,newrrv.rrval);
    3.42  	}
    3.43 @@ -312,6 +321,31 @@ int set_one_rr_efi(unsigned long rr, uns
    3.44  	return 1;
    3.45  }
    3.46  
    3.47 +void
    3.48 +set_one_rr_efi_restore(unsigned long rr, unsigned long val)
    3.49 +{
    3.50 +	unsigned long rreg = REGION_NUMBER(rr);
    3.51 +	
    3.52 +	BUG_ON(rreg != 6 && rreg != 7);
    3.53 +
    3.54 +	if (rreg == 6) {
    3.55 +		ia64_set_rr(rr, val);
    3.56 +		ia64_srlz_d();
    3.57 +	} else {
    3.58 +		/* firmware call is done very early before struct vcpu
    3.59 +		   and strcut domain are initialized. */
    3.60 +		if (unlikely(current == NULL || current->domain == NULL ||
    3.61 +			     is_idle_vcpu(current)))
    3.62 +			ia64_new_rr7_efi(val, cpu_isset(smp_processor_id(),
    3.63 +							percpu_set),
    3.64 +					 0UL);
    3.65 +		else if (VMX_DOMAIN(current))
    3.66 +			__vmx_switch_rr7_vcpu(current, val);
    3.67 +		else
    3.68 +			ia64_new_rr7_vcpu(current, val);
    3.69 +	}
    3.70 +}
    3.71 +
    3.72  void set_virtual_rr0(void)
    3.73  {
    3.74  	struct vcpu *v = current;
     4.1 --- a/xen/include/asm-ia64/linux-xen/linux/efi.h	Mon Aug 25 19:04:37 2008 +0900
     4.2 +++ b/xen/include/asm-ia64/linux-xen/linux/efi.h	Mon Aug 25 19:04:37 2008 +0900
     4.3 @@ -487,8 +487,8 @@ struct efi_generic_dev_path {
     4.4  #define XEN_EFI_RR_LEAVE(rr6, rr7) do {			\
     4.5  	if (rr7 != XEN_EFI_RR) {			\
     4.6  		efi_unmap_pal_code();			\
     4.7 -		set_one_rr_efi(6UL << 61, rr6);		\
     4.8 -		set_one_rr_efi(7UL << 61, rr7);		\
     4.9 +		set_one_rr_efi_restore(6UL << 61, rr6);	\
    4.10 +		set_one_rr_efi_restore(7UL << 61, rr7);	\
    4.11  	}						\
    4.12  } while (0)
    4.13  
     5.1 --- a/xen/include/asm-ia64/regionreg.h	Mon Aug 25 19:04:37 2008 +0900
     5.2 +++ b/xen/include/asm-ia64/regionreg.h	Mon Aug 25 19:04:37 2008 +0900
     5.3 @@ -46,6 +46,7 @@ extern cpumask_t percpu_set;
     5.4  
     5.5  int set_one_rr(unsigned long rr, unsigned long val);
     5.6  int set_one_rr_efi(unsigned long rr, unsigned long val);
     5.7 +void set_one_rr_efi_restore(unsigned long rr, unsigned long val);
     5.8  
     5.9  // This function is purely for performance... apparently scrambling
    5.10  //  bits in the region id makes for better hashing, which means better
     6.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Mon Aug 25 19:04:37 2008 +0900
     6.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Mon Aug 25 19:04:37 2008 +0900
     6.3 @@ -105,8 +105,8 @@ extern int vmx_vcpu_pend_interrupt(VCPU 
     6.4  extern void vcpu_load_kernel_regs(VCPU * vcpu);
     6.5  extern void __vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
     6.6                               void *shared_arch_info);
     6.7 -extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
     6.8 -                           void *shared_arch_info);
     6.9 +extern void __vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid);
    6.10 +extern void vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid);
    6.11  extern void vmx_ia64_set_dcr(VCPU * v);
    6.12  extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
    6.13  extern void vmx_asm_bsw0(void);