ia64/xen-unstable

changeset 18094:7da7b53b2139

[IA64] kexec: Only map PAL when making EFI, PAL or SAL calls

Move PAL code from the Xen identity mapped region to the
EFI identity mapped region, which overlaps with guest virtual space.

Make sure that PAL memory is only pinned into the TLB when making
EFI, PAL or SAL calls.

This seems to be nice as it provides a symmetrical approach to
mapping an unmapping pal code.

However it would be just as safe, and arguably simpler just to map
the PAL code (once?) when the EFI RR is active - for instance very
early on in boot, or when calling XEN_EFI_RR_ENTER. In other words,
unpining is XEN_EFI_RR_LEAVE shouldn't be neccessary, as the EFI RR
should protect the memory from unwanted accesses by guests (or
the hypevisor for that matter).

This patch is mostly the work of Yamahata-san.

Cc: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Simon Horman <horms@verge.net.au>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Tue Jul 22 12:15:02 2008 +0900 (2008-07-22)
parents 54060aec0dc1
children 2fd648307ad1
files xen/arch/ia64/linux-xen/efi.c xen/arch/ia64/linux-xen/mca_asm.S xen/arch/ia64/linux-xen/smpboot.c xen/arch/ia64/vmx/vmx_entry.S xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/xen/xenasm.S xen/include/asm-ia64/linux-xen/linux/efi.h xen/include/asm-ia64/vmx_vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/efi.c	Tue Jul 22 12:15:02 2008 +0900
     1.2 +++ b/xen/arch/ia64/linux-xen/efi.c	Tue Jul 22 12:15:02 2008 +0900
     1.3 @@ -424,7 +424,7 @@ efi_get_pal_addr (void)
     1.4  			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
     1.5  			vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
     1.6  #endif
     1.7 -		return __va(md->phys_addr);
     1.8 +		return __va_efi(md->phys_addr);
     1.9  	}
    1.10  	printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
    1.11  	       __FUNCTION__);
    1.12 @@ -432,7 +432,7 @@ efi_get_pal_addr (void)
    1.13  }
    1.14  
    1.15  #ifdef XEN
    1.16 -void *pal_vaddr = 0;
    1.17 +static void *pal_vaddr = 0;
    1.18  
    1.19  void *
    1.20  efi_get_pal_addr(void)
    1.21 @@ -443,24 +443,51 @@ efi_get_pal_addr(void)
    1.22  }
    1.23  #endif
    1.24  
    1.25 -void
    1.26 -efi_map_pal_code (void)
    1.27 +#ifdef XEN
    1.28 +static void
    1.29 +__efi_unmap_pal_code (void *pal_vaddr)
    1.30  {
    1.31 -#ifdef XEN
    1.32 -	u64 psr;
    1.33 -	(void)efi_get_pal_addr();
    1.34 -#else
    1.35 +	ia64_ptr(0x1, GRANULEROUNDDOWN((unsigned long)pal_vaddr),
    1.36 +		 IA64_GRANULE_SHIFT);
    1.37 +}
    1.38 +
    1.39 +void
    1.40 +efi_unmap_pal_code (void)
    1.41 +{
    1.42  	void *pal_vaddr = efi_get_pal_addr ();
    1.43  	u64 psr;
    1.44  
    1.45  	if (!pal_vaddr)
    1.46  		return;
    1.47 -#endif
    1.48  
    1.49  	/*
    1.50  	 * Cannot write to CRx with PSR.ic=1
    1.51  	 */
    1.52  	psr = ia64_clear_ic();
    1.53 +	__efi_unmap_pal_code(pal_vaddr);
    1.54 +	ia64_set_psr(psr);		/* restore psr */
    1.55 +	ia64_srlz_i();
    1.56 +}
    1.57 +#endif
    1.58 +
    1.59 +void
    1.60 +efi_map_pal_code (void)
    1.61 +{
    1.62 +	void *pal_vaddr = efi_get_pal_addr ();
    1.63 +	u64 psr;
    1.64 +
    1.65 +	if (!pal_vaddr)
    1.66 +		return;
    1.67 +
    1.68 +	/*
    1.69 +	 * Cannot write to CRx with PSR.ic=1
    1.70 +	 */
    1.71 +	psr = ia64_clear_ic();
    1.72 +#ifdef XEN
    1.73 +	/* pal_vaddr must be unpinned before pinning
    1.74 +	 * This is needed in the case of a nested EFI, PAL or SAL call */
    1.75 +	__efi_unmap_pal_code(pal_vaddr);
    1.76 +#endif
    1.77  	ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
    1.78  		 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
    1.79  		 IA64_GRANULE_SHIFT);
    1.80 @@ -594,7 +621,9 @@ efi_init (void)
    1.81  	}
    1.82  #endif
    1.83  
    1.84 +#ifndef XEN
    1.85  	efi_map_pal_code();
    1.86 +#endif
    1.87  	efi_enter_virtual_mode();
    1.88  }
    1.89  
     2.1 --- a/xen/arch/ia64/linux-xen/mca_asm.S	Tue Jul 22 12:15:02 2008 +0900
     2.2 +++ b/xen/arch/ia64/linux-xen/mca_asm.S	Tue Jul 22 12:15:02 2008 +0900
     2.3 @@ -473,6 +473,7 @@ ia64_reload_tr:
     2.4  	;;
     2.5  	srlz.d
     2.6  	;;
     2.7 +#ifndef XEN
     2.8  	// 3. Reload ITR for PAL code.
     2.9  	GET_THIS_PADDR(r2, ia64_mca_pal_pte)
    2.10  	;;
    2.11 @@ -491,6 +492,8 @@ ia64_reload_tr:
    2.12  	;;
    2.13  	srlz.i
    2.14  	;;
    2.15 +#endif
    2.16 +
    2.17  	// 4. Reload DTR for stack.
    2.18  #ifdef XEN
    2.19  	// Kernel registers are saved in a per_cpu cpu_kr_ia64_t
     3.1 --- a/xen/arch/ia64/linux-xen/smpboot.c	Tue Jul 22 12:15:02 2008 +0900
     3.2 +++ b/xen/arch/ia64/linux-xen/smpboot.c	Tue Jul 22 12:15:02 2008 +0900
     3.3 @@ -438,7 +438,9 @@ start_secondary (void *unused)
     3.4  	/* Early console may use I/O ports */
     3.5  	ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
     3.6  	Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
     3.7 +#ifndef XEN
     3.8  	efi_map_pal_code();
     3.9 +#endif
    3.10  	cpu_init();
    3.11  	smp_callin();
    3.12  
     4.1 --- a/xen/arch/ia64/vmx/vmx_entry.S	Tue Jul 22 12:15:02 2008 +0900
     4.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S	Tue Jul 22 12:15:02 2008 +0900
     4.3 @@ -598,7 +598,7 @@ END(ia64_leave_hypercall)
     4.4  /*
     4.5   * in0: new rr7
     4.6   * in1: virtual address of guest_vhpt
     4.7 - * in2: virtual address of pal code segment
     4.8 + * in2: virtual addres of guest shared_info
     4.9   * r8: will contain old rid value
    4.10   */
    4.11  
    4.12 @@ -611,7 +611,7 @@ END(ia64_leave_hypercall)
    4.13  GLOBAL_ENTRY(__vmx_switch_rr7)
    4.14         // not sure this unwind statement is correct...
    4.15         .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
    4.16 -	alloc loc1 = ar.pfs, 4, 8, 0, 0
    4.17 +	alloc loc1 = ar.pfs, 4, 7, 0, 0
    4.18  1:{
    4.19  	mov r28  = in0                  // copy procedure index
    4.20  	mov r8   = ip                   // save ip to compute branch
    4.21 @@ -623,13 +623,10 @@ 1:{
    4.22  	tpa loc2 = loc2                 // get physical address of per cpu date
    4.23  	tpa r3 = r8                     // get physical address of ip
    4.24  	dep loc5 = 0,in1,60,4           // get physical address of guest_vhpt
    4.25 -	dep loc6 = 0,in2,60,4           // get physical address of pal code
    4.26 -	dep loc7 = 0,in3,60,4           // get physical address of privregs
    4.27 +	dep loc6 = 0,in2,60,4           // get physical address of privregs
    4.28  	;;
    4.29  	dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT
    4.30                                          // mask granule shift
    4.31 -	dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT
    4.32 -                                        // mask granule shift
    4.33  	mov loc4 = psr                  // save psr
    4.34  	;;
    4.35  	mov loc3 = ar.rsc               // save RSE configuration
    4.36 @@ -725,46 +722,31 @@ 1:
    4.37  	;;
    4.38  .vhpt_overlaps:
    4.39  
    4.40 -	// re-pin mappings for PAL code section
    4.41 -	mov r24=IA64_TR_PALCODE
    4.42 -	or loc6 = r25,loc6              // construct PA | page properties
    4.43 -	mov r23 = IA64_GRANULE_SHIFT<<2
    4.44 -	;;
    4.45 -	ptr.i   in2,r23
    4.46 -	;;
    4.47 -	mov cr.itir=r23
    4.48 -	mov cr.ifa=in2
    4.49 -	;;
    4.50 -	itr.i itr[r24]=loc6             // wire in new mapping...
    4.51 -	;;
    4.52 -
    4.53  	// r16, r19, r20 are used by
    4.54  	//  ia64_switch_mode_phys()/ia64_switch_mode_virt()
    4.55  	// re-pin mappings for privregs
    4.56  	// r21  = (current physical addr) & (IA64_GRANULE_SIZE - 1)
    4.57  	// r17  = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)
    4.58 -	// loc6 = (((pal phys addr) & (IA64_GRANULE_SIZE - 1) << 2)) | PAGE_KERNEL
    4.59 -	// loc7 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)
    4.60 -	cmp.ne.unc p7,p0=r21,loc7	// check overlap with current stack
    4.61 +	// loc6 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)
    4.62 +	cmp.ne.unc p7,p0=r21,loc6	// check overlap with current stack
    4.63  	;;
    4.64 -(p7)	cmp.ne.unc p8,p0=r17,loc7	// check overlap with guest_vhpt
    4.65 +(p7)	cmp.ne.unc p8,p0=r17,loc6	// check overlap with guest_vhpt
    4.66  	;;
    4.67 -	// loc7 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | PAGE_KERNEL
    4.68 -	or loc7 = r25,loc7          // construct PA | page properties
    4.69 +	// loc6 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | PAGE_KERNEL
    4.70 +	or loc6 = r25,loc6          // construct PA | page properties
    4.71  	;;
    4.72 -	cmp.ne p9,p0=loc6,loc7
    4.73  	mov r22=IA64_TR_VPD
    4.74  	mov r24=IA64_TR_MAPPED_REGS
    4.75  	mov r23=IA64_GRANULE_SHIFT<<2
    4.76  	;;
    4.77 -(p9)	ptr.i   in3,r23	
    4.78 -(p8)	ptr.d   in3,r23
    4.79 +	ptr.i   in2,r23
    4.80 +(p8)	ptr.d   in2,r23
    4.81  	mov cr.itir=r23
    4.82 -	mov cr.ifa=in3
    4.83 +	mov cr.ifa=in2
    4.84  	;;
    4.85 -(p9)	itr.i itr[r22]=loc7         // wire in new mapping...
    4.86 +	itr.i itr[r22]=loc6         // wire in new mapping...
    4.87  	;;
    4.88 -(p8)	itr.d dtr[r24]=loc7         // wire in new mapping...
    4.89 +(p8)	itr.d dtr[r24]=loc6         // wire in new mapping...
    4.90  	;;
    4.91  
    4.92  	// done, switch back to virtual and return
     5.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Tue Jul 22 12:15:02 2008 +0900
     5.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Tue Jul 22 12:15:02 2008 +0900
     5.3 @@ -133,8 +133,6 @@ vmx_init_all_rr(VCPU *vcpu)
     5.4  	VMX(vcpu, vrr[VRN7]) = 0x738;
     5.5  }
     5.6  
     5.7 -extern void * pal_vaddr;
     5.8 -
     5.9  void
    5.10  vmx_load_all_rr(VCPU *vcpu)
    5.11  {
    5.12 @@ -173,7 +171,7 @@ vmx_load_all_rr(VCPU *vcpu)
    5.13  	ia64_dv_serialize_data();
    5.14  	vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
    5.15                        (void *)vcpu->arch.vhpt.hash,
    5.16 -		       pal_vaddr, vcpu->arch.privregs);
    5.17 +		       vcpu->arch.privregs);
    5.18  	ia64_set_pta(VMX(vcpu, mpta));
    5.19  	vmx_ia64_set_dcr(vcpu);
    5.20  
     6.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Tue Jul 22 12:15:02 2008 +0900
     6.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Tue Jul 22 12:15:02 2008 +0900
     6.3 @@ -197,12 +197,12 @@ void vmx_vcpu_set_rr_fast(VCPU *vcpu, u6
     6.4  }
     6.5  
     6.6  void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
     6.7 -                    void *pal_vaddr, void *shared_arch_info)
     6.8 +                    void *shared_arch_info)
     6.9  {
    6.10      __get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt;
    6.11      __get_cpu_var(inserted_vpd) = (unsigned long)shared_arch_info;
    6.12      __get_cpu_var(inserted_mapped_regs) = (unsigned long)shared_arch_info;
    6.13 -    __vmx_switch_rr7(rid, guest_vhpt, pal_vaddr, shared_arch_info);
    6.14 +    __vmx_switch_rr7(rid, guest_vhpt, shared_arch_info);
    6.15  }
    6.16  
    6.17  IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
    6.18 @@ -219,7 +219,7 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u6
    6.19      case VRN7:
    6.20          if (likely(vcpu == current))
    6.21              vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash,
    6.22 -                           pal_vaddr, vcpu->arch.privregs);
    6.23 +                           vcpu->arch.privregs);
    6.24         break;
    6.25      case VRN4:
    6.26          rrval = vrrtomrr(vcpu,val);
     7.1 --- a/xen/arch/ia64/xen/xenasm.S	Tue Jul 22 12:15:02 2008 +0900
     7.2 +++ b/xen/arch/ia64/xen/xenasm.S	Tue Jul 22 12:15:02 2008 +0900
     7.3 @@ -34,12 +34,13 @@
     7.4  //                         unsigned long va_vhpt)   	 /* in4 */
     7.5  //Local usage:
     7.6  //  loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse
     7.7 -//  loc5=pal_vaddr, loc6=xen_paddr, loc7=shared_archinfo_paddr,
     7.8 +//  loc5=shared_archinfo_paddr, loc6=xen_paddr,
     7.9  //  r16, r19, r20 are used by ia64_switch_mode_{phys, virt}()
    7.10 +// loc5 is unused.
    7.11  GLOBAL_ENTRY(ia64_new_rr7)
    7.12  	// FIXME? not sure this unwind statement is correct...
    7.13  	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
    7.14 -	alloc loc1 = ar.pfs, 5, 8, 0, 0
    7.15 +	alloc loc1 = ar.pfs, 5, 7, 0, 0
    7.16  	movl loc2=PERCPU_ADDR
    7.17  1:	{
    7.18  	  mov loc3 = psr		// save psr	
    7.19 @@ -51,7 +52,7 @@ 1:	{
    7.20  	tpa in1=in1			// grab shared_info BEFORE changing rr7
    7.21  	adds r8 = 1f-1b,r8		// calculate return address for call
    7.22  	;;
    7.23 -	tpa loc7=in2			// grab arch_vcpu_info BEFORE chg rr7
    7.24 +	tpa loc5=in2			// grab arch_vcpu_info BEFORE chg rr7
    7.25  	movl r17=PSR_BITS_TO_SET
    7.26  	mov loc4=ar.rsc			// save RSE configuration
    7.27  	movl r16=PSR_BITS_TO_CLEAR
    7.28 @@ -60,10 +61,7 @@ 1:	{
    7.29  	mov ar.rsc=0			// put RSE in enforced lazy, LE mode
    7.30  	or loc3=loc3,r17		// add in psr the bits to set
    7.31  	;;
    7.32 -	movl loc5=pal_vaddr		// get pal_vaddr
    7.33 -	;;
    7.34 -	ld8 loc5=[loc5]			// read pal_vaddr
    7.35 -	;;
    7.36 +
    7.37  	andcm r16=loc3,r16		// removes bits to clear from psr
    7.38  	dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
    7.39  	br.call.sptk.many rp=ia64_switch_mode_phys
    7.40 @@ -163,25 +161,13 @@ 1:
    7.41  	add r22=r22,in3
    7.42  	;;
    7.43  	ptr.d	r22,r24
    7.44 -	or r23=loc7,r25			// construct PA | page properties
    7.45 +	or r23=loc5,r25			// construct PA | page properties
    7.46  	mov cr.itir=r24
    7.47  	mov cr.ifa=r22
    7.48  	mov r21=IA64_TR_MAPPED_REGS
    7.49  	;;
    7.50  	itr.d dtr[r21]=r23		// wire in new mapping...
    7.51  
    7.52 -	// Purge/insert PAL TR
    7.53 -	mov r24=IA64_TR_PALCODE
    7.54 -	mov r23=IA64_GRANULE_SHIFT<<2
    7.55 -	dep r25=0,loc5,60,4		// convert pal vaddr to paddr
    7.56 -	;;
    7.57 -	ptr.i	loc5,r23
    7.58 -	or r25=r25,r26			// construct PA | page properties
    7.59 -	mov cr.itir=r23
    7.60 -	mov cr.ifa=loc5
    7.61 -	;;
    7.62 -	itr.i itr[r24]=r25
    7.63 -
    7.64  	// done, switch back to virtual and return
    7.65  	mov r16=loc3			// r16= original psr
    7.66  	br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
    7.67 @@ -216,7 +202,7 @@ END(ia64_new_rr7)
    7.68  GLOBAL_ENTRY(ia64_new_rr7_efi)
    7.69  	// FIXME? not sure this unwind statement is correct...
    7.70  	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
    7.71 -	alloc loc1 = ar.pfs, 3, 8, 0, 0
    7.72 +	alloc loc1 = ar.pfs, 3, 7, 0, 0
    7.73  	movl loc2=PERCPU_ADDR
    7.74  1:	{
    7.75  	  mov loc3 = psr		// save psr
    7.76 @@ -235,17 +221,13 @@ 1:	{
    7.77  	mov ar.rsc=0			// put RSE in enforced lazy, LE mode
    7.78  	or loc3=loc3,r17		// add in psr the bits to set
    7.79  	;;
    7.80 -        movl loc5=pal_vaddr             // get pal_vaddr
    7.81 -	;;
    7.82 -	ld8 loc5=[loc5]                 // read pal_vaddr
    7.83 +	dep loc6 = 0,in2,60,4		// get physical address of VPD
    7.84  	;;
    7.85 -	dep loc7 = 0,in2,60,4		// get physical address of VPD
    7.86 -	;;
    7.87 -	dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT
    7.88 +	dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT
    7.89  					// mask granule shift
    7.90  	;;
    7.91  	andcm r16=loc3,r16		// removes bits to clear from psr
    7.92 -	dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
    7.93 +	dep loc5=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
    7.94  	br.call.sptk.many rp=ia64_switch_mode_phys
    7.95  1:
    7.96  	movl	r26=PAGE_KERNEL
    7.97 @@ -271,7 +253,7 @@ 1:
    7.98  	mov r16=IA64_TR_KERNEL
    7.99  	mov cr.itir=r24
   7.100  	mov cr.ifa=r17
   7.101 -	or r18=loc6,r26
   7.102 +	or r18=loc5,r26
   7.103  	;;
   7.104  	itr.i itr[r16]=r18
   7.105  	;;
   7.106 @@ -324,7 +306,7 @@ ia64_new_rr7_efi_percpu_not_mapped:
   7.107  	// VPD
   7.108  	cmp.eq p7,p0=r0,in2
   7.109  (p7)	br.cond.sptk ia64_new_rr7_efi_vpd_not_mapped
   7.110 -	or loc7 = r26,loc7		// construct PA | page properties
   7.111 +	or loc6 = r26,loc6		// construct PA | page properties
   7.112  	mov r22=IA64_TR_VPD
   7.113  	mov r24=IA64_TR_MAPPED_REGS
   7.114  	mov r23=IA64_GRANULE_SHIFT<<2
   7.115 @@ -340,9 +322,9 @@ ia64_new_rr7_efi_percpu_not_mapped:
   7.116  	mov cr.itir=r23
   7.117  	mov cr.ifa=in2
   7.118  	;;
   7.119 -	itr.i itr[r22]=loc7
   7.120 +	itr.i itr[r22]=loc6
   7.121  	;;
   7.122 -	itr.d dtr[r24]=loc7
   7.123 +	itr.d dtr[r24]=loc6
   7.124  	;;
   7.125  	srlz.i
   7.126  	;;
   7.127 @@ -350,18 +332,6 @@ ia64_new_rr7_efi_percpu_not_mapped:
   7.128  	;;
   7.129  ia64_new_rr7_efi_vpd_not_mapped:
   7.130  
   7.131 -	// Purge/insert PAL TR
   7.132 -	mov r24=IA64_TR_PALCODE
   7.133 -	mov r23=IA64_GRANULE_SHIFT<<2
   7.134 -	dep r25=0,loc5,60,4		// convert pal vaddr to paddr
   7.135 -	;;
   7.136 -	ptr.i	loc5,r23
   7.137 -	or r25=r25,r26			// construct PA | page properties
   7.138 -	mov cr.itir=r23
   7.139 -	mov cr.ifa=loc5
   7.140 -	;;
   7.141 -	itr.i itr[r24]=r25
   7.142 -
   7.143  	// done, switch back to virtual and return
   7.144  	mov r16=loc3			// r16= original psr
   7.145  	br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
     8.1 --- a/xen/include/asm-ia64/linux-xen/linux/efi.h	Tue Jul 22 12:15:02 2008 +0900
     8.2 +++ b/xen/include/asm-ia64/linux-xen/linux/efi.h	Tue Jul 22 12:15:02 2008 +0900
     8.3 @@ -24,10 +24,6 @@
     8.4  #include <asm/page.h>
     8.5  #include <asm/system.h>
     8.6  
     8.7 -#ifdef XEN
     8.8 -extern void * pal_vaddr;
     8.9 -#endif
    8.10 -
    8.11  #define EFI_SUCCESS		0
    8.12  #define EFI_LOAD_ERROR          ( 1 | (1UL << (BITS_PER_LONG-1)))
    8.13  #define EFI_INVALID_PARAMETER	( 2 | (1UL << (BITS_PER_LONG-1)))
    8.14 @@ -302,6 +298,9 @@ efi_guid_unparse(efi_guid_t *guid, char 
    8.15  extern void efi_init (void);
    8.16  extern void *efi_get_pal_addr (void);
    8.17  extern void efi_map_pal_code (void);
    8.18 +#ifdef XEN
    8.19 +extern void efi_unmap_pal_code (void);
    8.20 +#endif
    8.21  extern void efi_map_memmap(void);
    8.22  extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
    8.23  extern void efi_gettimeofday (struct timespec *ts);
    8.24 @@ -466,16 +465,32 @@ struct efi_generic_dev_path {
    8.25   * E: bits N-53: reserved (0)
    8.26   */
    8.27  
    8.28 +/* rr7 (and rr6) may already be set to XEN_EFI_RR, which
    8.29 + * would indicate a nested EFI, SAL or PAL call, such
    8.30 + * as from an MCA. This may have occured during a call
    8.31 + * to set_one_rr_efi(). To be safe, repin everything anyway.
    8.32 + */
    8.33 +
    8.34  #define XEN_EFI_RR_ENTER(rr6, rr7) do {			\
    8.35  	rr6 = ia64_get_rr(6UL << 61);			\
    8.36  	rr7 = ia64_get_rr(7UL << 61);			\
    8.37  	set_one_rr_efi(6UL << 61, XEN_EFI_RR);		\
    8.38  	set_one_rr_efi(7UL << 61, XEN_EFI_RR);		\
    8.39 +	efi_map_pal_code();				\
    8.40  } while (0)
    8.41  
    8.42 +/* There is no need to do anything if the saved rr7 (and rr6)
    8.43 + * is XEN_EFI_RR, as it would just switch them from XEN_EFI_RR to XEN_EFI_RR
    8.44 + * Furthermore, if this is a nested call it is important not
    8.45 + * to unpin efi_unmap_pal_code() until the outermost call is finished
    8.46 + */
    8.47 +
    8.48  #define XEN_EFI_RR_LEAVE(rr6, rr7) do {			\
    8.49 -	set_one_rr_efi(6UL << 61, rr6);			\
    8.50 -	set_one_rr_efi(7UL << 61, rr7);			\
    8.51 +	if (rr7 != XEN_EFI_RR) {			\
    8.52 +		efi_unmap_pal_code();			\
    8.53 +		set_one_rr_efi(6UL << 61, rr6);		\
    8.54 +		set_one_rr_efi(7UL << 61, rr7);		\
    8.55 +	}						\
    8.56  } while (0)
    8.57  
    8.58  #else
     9.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Tue Jul 22 12:15:02 2008 +0900
     9.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Tue Jul 22 12:15:02 2008 +0900
     9.3 @@ -104,9 +104,9 @@ extern uint64_t guest_read_vivr(VCPU * v
     9.4  extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
     9.5  extern void vcpu_load_kernel_regs(VCPU * vcpu);
     9.6  extern void __vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
     9.7 -                             void *pal_vaddr, void *shared_arch_info);
     9.8 +                             void *shared_arch_info);
     9.9  extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
    9.10 -                           void *pal_vaddr, void *shared_arch_info);
    9.11 +                           void *shared_arch_info);
    9.12  extern void vmx_ia64_set_dcr(VCPU * v);
    9.13  extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
    9.14  extern void vmx_asm_bsw0(void);