ia64/xen-unstable

changeset 16781:6f7e6608cb74

[IA64] domheap: Allocate privregs from domain heap for VTi domain

- Pin privregs down with both dtr/itr so that privregs can be allocated
from the domain heap
- Introduce vmx_vpd_pin()/vmx_vpd_unpin().
The vpd area is pinned down when current. But two functions,
update_vhpi() and alloc_vpd() are exceptions.
We have to pin down the area before PAL call.
- Minor twist context switch not to use unpinned vpd area.
vmx_load_state() needs the vpd area pinned down.
Call it after vmx_load_all_rr()
- Fix vmx_load_all_rr()
vmx_switch_rr7() sets psr.ic = 0 so that clearing psr.ic before calling
vmx_switch_rr7() doesn't make sense.
- Improve vmx_switch_rr7()
It sets psr.ic = 0 after switching to physical mode. But it can be
done at the switching time.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 80626da7f6e3
children 4f1f9ee50133
files xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/vmx/vmx_entry.S xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/xen/domain.c xen/include/asm-ia64/vmx_vcpu.h xen/include/asm-ia64/vmx_vpd.h xen/include/asm-ia64/xenkregs.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Thu Jan 17 12:05:43 2008 -0700
     1.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Thu Jan 17 12:05:43 2008 -0700
     1.3 @@ -36,6 +36,7 @@
     1.4  #include <asm/gcc_intrin.h>
     1.5  #include <asm/vmx_mm_def.h>
     1.6  #include <asm/vmx.h>
     1.7 +#include <asm/vmx_vpd.h>
     1.8  #include <asm/hw_irq.h>
     1.9  #include <asm/vmx_pal_vsa.h>
    1.10  #include <asm/kregs.h>
    1.11 @@ -91,9 +92,12 @@ static void update_vhpi(VCPU *vcpu, int 
    1.12  
    1.13      VCPU(vcpu,vhpi) = vhpi;
    1.14      // TODO: Add support for XENO
    1.15 -    if (VCPU(vcpu,vac).a_int)
    1.16 +    if (VCPU(vcpu,vac).a_int) {
    1.17 +        vmx_vpd_pin(vcpu);
    1.18          ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT, 
    1.19                        (uint64_t)vcpu->arch.privregs, 0, 0, 0, 0, 0, 0);
    1.20 +        vmx_vpd_unpin(vcpu);
    1.21 +    }
    1.22  }
    1.23  
    1.24  
     2.1 --- a/xen/arch/ia64/vmx/vmx_entry.S	Thu Jan 17 12:05:43 2008 -0700
     2.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S	Thu Jan 17 12:05:43 2008 -0700
     2.3 @@ -623,14 +623,14 @@ END(ia64_leave_hypercall)
     2.4  #define PSR_BITS_TO_CLEAR                                           \
     2.5  	(IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB |     \
     2.6  	 IA64_PSR_RT | IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI |    \
     2.7 -	 IA64_PSR_ED | IA64_PSR_DFL | IA64_PSR_DFH)
     2.8 +	 IA64_PSR_ED | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_IC)
     2.9  #define PSR_BITS_TO_SET    IA64_PSR_BN
    2.10  
    2.11 -//extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt, void * pal_vaddr );
    2.12 +//extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt, void * pal_vaddr, void * shared_arch_info );
    2.13  GLOBAL_ENTRY(vmx_switch_rr7)
    2.14         // not sure this unwind statement is correct...
    2.15         .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
    2.16 -       alloc loc1 = ar.pfs, 3, 7, 0, 0
    2.17 +	alloc loc1 = ar.pfs, 4, 8, 0, 0
    2.18  1:{
    2.19  	mov r28  = in0                  // copy procedure index
    2.20  	mov r8   = ip                   // save ip to compute branch
    2.21 @@ -643,7 +643,12 @@ 1:{
    2.22  	tpa r3 = r8                     // get physical address of ip
    2.23  	dep loc5 = 0,in1,60,4           // get physical address of guest_vhpt
    2.24  	dep loc6 = 0,in2,60,4           // get physical address of pal code
    2.25 +	dep loc7 = 0,in3,60,4           // get physical address of privregs
    2.26  	;;
    2.27 +	dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT
    2.28 +                                        // mask granule shift
    2.29 +	dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT
    2.30 +                                        // mask granule shift
    2.31  	mov loc4 = psr                  // save psr
    2.32  	;;
    2.33  	mov loc3 = ar.rsc               // save RSE configuration
    2.34 @@ -661,11 +666,9 @@ 1:
    2.35  	dep r16=-1,r0,61,3
    2.36  	;;
    2.37  	mov rr[r16]=in0
    2.38 +	;;
    2.39  	srlz.d
    2.40  	;;
    2.41 -	rsm 0x6000
    2.42 -	;;
    2.43 -	srlz.d
    2.44  
    2.45  	// re-pin mappings for kernel text and data
    2.46  	mov r18=KERNEL_TR_PAGE_SHIFT<<2
    2.47 @@ -679,6 +682,7 @@ 1:
    2.48  	mov r16=IA64_TR_KERNEL
    2.49  	movl r25 = PAGE_KERNEL
    2.50  	// r2=KERNEL_TR_PAGE_SHIFT truncated physicall address of ip
    2.51 +	//   = ia64_tpa(ip) & (KERNEL_TR_PAGE_SIZE - 1)
    2.52  	dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
    2.53  	;;
    2.54  	or r24=r2,r25
    2.55 @@ -737,7 +741,9 @@ 1:
    2.56  	// re-pin mappings for guest_vhpt
    2.57  	// unless overlaps with IA64_TR_XEN_HEAP_REGS or IA64_TR_CURRENT_STACK
    2.58  	dep r18=0,loc5,0,KERNEL_TR_PAGE_SHIFT
    2.59 +	// r21 = (current physical addr) & (IA64_GRANULE_SIZE - 1)
    2.60  	dep r21=0,r21,0,IA64_GRANULE_SHIFT 
    2.61 +	// r17 = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)
    2.62  	dep r17=0,loc5,0,IA64_GRANULE_SHIFT 
    2.63  	;;
    2.64  	cmp.eq p6,p0=r18,r2             // check overlap with xen heap
    2.65 @@ -771,6 +777,43 @@ 1:
    2.66  	itr.i itr[r24]=loc6             // wire in new mapping...
    2.67  	;;
    2.68  
    2.69 +	// r16, r19, r20 are used by
    2.70 +	//  ia64_switch_mode_phys()/ia64_switch_mode_virt()
    2.71 +	// re-pin mappings for privregs
    2.72 +	// r2   = ia64_tpa(ip) & (KERNEL_TR_PAGE_SIZE - 1)
    2.73 +	// r21  = (current physical addr) & (IA64_GRANULE_SIZE - 1)
    2.74 +	// r17  = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)
    2.75 +
    2.76 +	// r24  = (privregs physical addr) & (KERNEL_TR_PAGE_SIZE - 1)
    2.77 +	// loc6 = (((pal phys addr) & (IA64_GRANULE_SIZE - 1) << 2)) | PAGE_KERNEL
    2.78 +	// loc7 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)
    2.79 +	dep r24 = 0,loc7,0,KERNEL_TR_PAGE_SHIFT
    2.80 +	;;
    2.81 +	cmp.ne p6,p0=r24,r2		// check overlap with xen heap
    2.82 +	;; 
    2.83 +(p6)	cmp.ne.unc p7,p0=r21,loc7	// check overlap with current stack
    2.84 +	;;
    2.85 +(p7)	cmp.ne.unc p8,p0=r17,loc7	// check overlap with guest_vhpt
    2.86 +	;;
    2.87 +	// loc7 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | PAGE_KERNEL
    2.88 +	or loc7 = r25,loc7          // construct PA | page properties
    2.89 +	;;
    2.90 +	cmp.ne p9,p0=loc6,loc7
    2.91 +	mov r22=IA64_TR_VPD
    2.92 +	mov r24=IA64_TR_MAPPED_REGS
    2.93 +	mov r23=IA64_GRANULE_SHIFT<<2
    2.94 +	;;
    2.95 +(p9)	ptr.i   in3,r23	
    2.96 +(p8)	ptr.d   in3,r23
    2.97 +	mov cr.itir=r23
    2.98 +	mov cr.ifa=in3
    2.99 +	;;
   2.100 +(p9)	itr.i itr[r22]=loc7         // wire in new mapping...
   2.101 +	;;
   2.102 +(p8)	itr.d dtr[r24]=loc7         // wire in new mapping...
   2.103 +	;;
   2.104 +.privregs_overlaps:
   2.105 +
   2.106  	// done, switch back to virtual and return
   2.107  	mov r16=loc4                    // r16= original psr
   2.108  	br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
     3.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Thu Jan 17 12:05:43 2008 -0700
     3.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Thu Jan 17 12:05:43 2008 -0700
     3.3 @@ -51,6 +51,7 @@
     3.4  #include <asm/viosapic.h>
     3.5  #include <xen/event.h>
     3.6  #include <asm/vlsapic.h>
     3.7 +#include <asm/vhpt.h>
     3.8  #include "entry.h"
     3.9  
    3.10  /* Global flag to identify whether Intel vmx feature is on */
    3.11 @@ -150,20 +151,21 @@ typedef union {
    3.12  	};
    3.13  } cpuid3_t;
    3.14  
    3.15 -/* Allocate vpd from xenheap */
    3.16 +/* Allocate vpd from domheap */
    3.17  static vpd_t *alloc_vpd(void)
    3.18  {
    3.19  	int i;
    3.20  	cpuid3_t cpuid3;
    3.21 +	struct page_info *page;
    3.22  	vpd_t *vpd;
    3.23  	mapped_regs_t *mregs;
    3.24  
    3.25 -	vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
    3.26 -	if (!vpd) {
    3.27 +	page = alloc_domheap_pages(NULL, get_order(VPD_SIZE), 0);
    3.28 +	if (page == NULL) {
    3.29  		printk("VPD allocation failed.\n");
    3.30  		return NULL;
    3.31  	}
    3.32 -	vpd = (vpd_t *)virt_to_xenva(vpd);
    3.33 +	vpd = page_to_virt(page);
    3.34  
    3.35  	printk(XENLOG_DEBUG "vpd base: 0x%p, vpd size:%ld\n",
    3.36  	       vpd, sizeof(vpd_t));
    3.37 @@ -191,12 +193,79 @@ static vpd_t *alloc_vpd(void)
    3.38  	return vpd;
    3.39  }
    3.40  
    3.41 -/* Free vpd to xenheap */
    3.42 +/* Free vpd to domheap */
    3.43  static void
    3.44  free_vpd(struct vcpu *v)
    3.45  {
    3.46  	if ( v->arch.privregs )
    3.47 -		free_xenheap_pages(v->arch.privregs, get_order(VPD_SIZE));
    3.48 +		free_domheap_pages(virt_to_page(v->arch.privregs),
    3.49 +				   get_order(VPD_SIZE));
    3.50 +}
    3.51 +
    3.52 +// This is used for PAL_VP_CREATE and PAL_VPS_SET_PENDING_INTERRUPT
    3.53 +// so that we don't have to pin the vpd down with itr[].
    3.54 +void
    3.55 +__vmx_vpd_pin(struct vcpu* v)
    3.56 +{
    3.57 +	unsigned long privregs = (unsigned long)v->arch.privregs;
    3.58 +	u64 psr;
    3.59 +	
    3.60 +	// check overlapping with xenheap
    3.61 +	if ((privregs &
    3.62 +	     ~(KERNEL_TR_PAGE_SIZE - 1)) ==
    3.63 +	    ((unsigned long)__va(ia64_tpa(current_text_addr())) &
    3.64 +	     ~(KERNEL_TR_PAGE_SIZE - 1)))
    3.65 +		return;
    3.66 +		
    3.67 +	privregs &= ~(IA64_GRANULE_SIZE - 1);
    3.68 +
    3.69 +	// check overlapping with current stack
    3.70 +	if (privregs ==
    3.71 +	    ((unsigned long)current & ~(IA64_GRANULE_SIZE - 1)))
    3.72 +		return;
    3.73 +
    3.74 +	if (!VMX_DOMAIN(current)) {
    3.75 +		// check overlapping with vhpt
    3.76 +		if (privregs ==
    3.77 +		    (vcpu_vhpt_maddr(current) & ~(IA64_GRANULE_SHIFT - 1)))
    3.78 +			return;
    3.79 +	} else {
    3.80 +		// check overlapping with vhpt
    3.81 +		if (privregs ==
    3.82 +		    ((unsigned long)current->arch.vhpt.hash &
    3.83 +		     ~(IA64_GRANULE_SHIFT - 1)))
    3.84 +			return;
    3.85 +
    3.86 +		// check overlapping with privregs
    3.87 +		if (privregs ==
    3.88 +		    ((unsigned long)current->arch.privregs &
    3.89 +		     ~(IA64_GRANULE_SHIFT - 1)))
    3.90 +			return;
    3.91 +	}
    3.92 +
    3.93 +	psr = ia64_clear_ic();
    3.94 +	ia64_ptr(0x2 /*D*/, privregs, IA64_GRANULE_SIZE);
    3.95 +	ia64_srlz_d();
    3.96 +	ia64_itr(0x2 /*D*/, IA64_TR_MAPPED_REGS, privregs,
    3.97 +		 pte_val(pfn_pte(__pa(privregs) >> PAGE_SHIFT, PAGE_KERNEL)),
    3.98 +		 IA64_GRANULE_SHIFT);
    3.99 +	ia64_set_psr(psr);
   3.100 +	ia64_srlz_d();
   3.101 +}
   3.102 +
   3.103 +void
   3.104 +__vmx_vpd_unpin(struct vcpu* v)
   3.105 +{
   3.106 +	if (!VMX_DOMAIN(current)) {
   3.107 +		int rc;
   3.108 +		rc = !set_one_rr(VRN7 << VRN_SHIFT, VCPU(current, rrs[VRN7]));
   3.109 +		BUG_ON(rc);
   3.110 +	} else {
   3.111 +		IA64FAULT fault;
   3.112 +		fault = vmx_vcpu_set_rr(current, VRN7 << VRN_SHIFT,
   3.113 +					VMX(current, vrr[VRN7]));
   3.114 +		BUG_ON(fault != IA64_NO_FAULT);
   3.115 +	}
   3.116  }
   3.117  
   3.118  /*
   3.119 @@ -212,7 +281,11 @@ vmx_create_vp(struct vcpu *v)
   3.120  	/* ia64_ivt is function pointer, so need this tranlation */
   3.121  	ivt_base = (u64) &vmx_ia64_ivt;
   3.122  	printk(XENLOG_DEBUG "ivt_base: 0x%lx\n", ivt_base);
   3.123 +
   3.124 +	vmx_vpd_pin(v);
   3.125  	ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
   3.126 +	vmx_vpd_unpin(v);
   3.127 +	
   3.128  	if (ret != PAL_STATUS_SUCCESS){
   3.129  		panic_domain(vcpu_regs(v),"ia64_pal_vp_create failed. \n");
   3.130  	}
   3.131 @@ -224,6 +297,7 @@ vmx_save_state(struct vcpu *v)
   3.132  {
   3.133  	u64 status;
   3.134  
   3.135 +	BUG_ON(v != current);
   3.136  	/* FIXME: about setting of pal_proc_vector... time consuming */
   3.137  	status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
   3.138  	if (status != PAL_STATUS_SUCCESS){
   3.139 @@ -250,6 +324,7 @@ vmx_load_state(struct vcpu *v)
   3.140  {
   3.141  	u64 status;
   3.142  
   3.143 +	BUG_ON(v != current);
   3.144  	status = ia64_pal_vp_restore((u64 *)v->arch.privregs, 0);
   3.145  	if (status != PAL_STATUS_SUCCESS){
   3.146  		panic_domain(vcpu_regs(v),"Restore vp status failed\n");
   3.147 @@ -518,6 +593,7 @@ void vmx_do_resume(struct vcpu *v)
   3.148  	ioreq_t *p;
   3.149  
   3.150  	vmx_load_all_rr(v);
   3.151 +	vmx_load_state(v);
   3.152  	migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor);
   3.153  
   3.154  	/* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */
     4.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Thu Jan 17 12:05:43 2008 -0700
     4.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Thu Jan 17 12:05:43 2008 -0700
     4.3 @@ -138,7 +138,6 @@ extern void * pal_vaddr;
     4.4  void
     4.5  vmx_load_all_rr(VCPU *vcpu)
     4.6  {
     4.7 -	unsigned long psr;
     4.8  	unsigned long rr0, rr4;
     4.9  
    4.10  	switch (vcpu->arch.arch_vmx.mmu_mode) {
    4.11 @@ -158,8 +157,6 @@ vmx_load_all_rr(VCPU *vcpu)
    4.12  		panic_domain(NULL, "bad mmu mode value");
    4.13  	}
    4.14  
    4.15 -	psr = ia64_clear_ic();
    4.16 -
    4.17  	ia64_set_rr((VRN0 << VRN_SHIFT), rr0);
    4.18  	ia64_dv_serialize_data();
    4.19  	ia64_set_rr((VRN4 << VRN_SHIFT), rr4);
    4.20 @@ -175,13 +172,12 @@ vmx_load_all_rr(VCPU *vcpu)
    4.21  	ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
    4.22  	ia64_dv_serialize_data();
    4.23  	vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
    4.24 -		       (void *)vcpu->arch.vhpt.hash, pal_vaddr);
    4.25 +                      (void *)vcpu->arch.vhpt.hash,
    4.26 +		       pal_vaddr, vcpu->arch.privregs);
    4.27  	ia64_set_pta(VMX(vcpu, mpta));
    4.28  	vmx_ia64_set_dcr(vcpu);
    4.29  
    4.30  	ia64_srlz_d();
    4.31 -	ia64_set_psr(psr);
    4.32 -	ia64_srlz_i();
    4.33  }
    4.34  
    4.35  void
     5.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Thu Jan 17 12:05:43 2008 -0700
     5.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Thu Jan 17 12:05:43 2008 -0700
     5.3 @@ -181,8 +181,8 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u6
     5.4      switch((u64)(reg>>VRN_SHIFT)) {
     5.5      case VRN7:
     5.6          if (likely(vcpu == current))
     5.7 -            vmx_switch_rr7(vrrtomrr(vcpu,val),
     5.8 -                           (void *)vcpu->arch.vhpt.hash, pal_vaddr );
     5.9 +            vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash,
    5.10 +                           pal_vaddr, vcpu->arch.privregs);
    5.11         break;
    5.12      case VRN4:
    5.13          rrval = vrrtomrr(vcpu,val);
     6.1 --- a/xen/arch/ia64/xen/domain.c	Thu Jan 17 12:05:43 2008 -0700
     6.2 +++ b/xen/arch/ia64/xen/domain.c	Thu Jan 17 12:05:43 2008 -0700
     6.3 @@ -241,8 +241,6 @@ void context_switch(struct vcpu *prev, s
     6.4              ia64_setreg(_IA64_REG_CR_DCR, dcr);
     6.5          }
     6.6      }
     6.7 -    if (VMX_DOMAIN(next))
     6.8 -        vmx_load_state(next);
     6.9  
    6.10      ia64_disable_vhpt_walker();
    6.11      lazy_fp_switch(prev, current);
    6.12 @@ -261,6 +259,7 @@ void context_switch(struct vcpu *prev, s
    6.13  
    6.14      if (VMX_DOMAIN(current)) {
    6.15          vmx_load_all_rr(current);
    6.16 +        vmx_load_state(current);
    6.17          migrate_timer(&current->arch.arch_vmx.vtm.vtm_timer,
    6.18                        current->processor);
    6.19      } else {
     7.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Thu Jan 17 12:05:43 2008 -0700
     7.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Thu Jan 17 12:05:43 2008 -0700
     7.3 @@ -114,7 +114,7 @@ extern void memwrite_v(VCPU * vcpu, thas
     7.4                         size_t s);
     7.5  extern void memwrite_p(VCPU * vcpu, u64 * src, u64 * dest, size_t s);
     7.6  extern void vcpu_load_kernel_regs(VCPU * vcpu);
     7.7 -extern void vmx_switch_rr7(unsigned long, void *, void *);
     7.8 +extern void vmx_switch_rr7(unsigned long, void *, void *, void *);
     7.9  
    7.10  extern void dtlb_fault(VCPU * vcpu, u64 vadr);
    7.11  extern void nested_dtlb(VCPU * vcpu);
     8.1 --- a/xen/include/asm-ia64/vmx_vpd.h	Thu Jan 17 12:05:43 2008 -0700
     8.2 +++ b/xen/include/asm-ia64/vmx_vpd.h	Thu Jan 17 12:05:43 2008 -0700
     8.3 @@ -80,6 +80,24 @@ struct arch_vmx_struct {
     8.4  
     8.5  #define ARCH_VMX_DOMAIN         0       /* Need it to indicate VTi domain */
     8.6  
     8.7 +/* pin/unpin vpd area for PAL call with DTR[] */
     8.8 +void __vmx_vpd_pin(struct vcpu* v);
     8.9 +void __vmx_vpd_unpin(struct vcpu* v); 
    8.10 +
    8.11 +static inline void vmx_vpd_pin(struct vcpu* v)
    8.12 +{
    8.13 +    if (likely(v == current))
    8.14 +        return;
    8.15 +    __vmx_vpd_pin(v);
    8.16 +}
    8.17 +
    8.18 +static inline void vmx_vpd_unpin(struct vcpu* v)
    8.19 +{
    8.20 +    if (likely(v == current))
    8.21 +        return;
    8.22 +    __vmx_vpd_unpin(v);
    8.23 +}
    8.24 +
    8.25  #endif //__ASSEMBLY__
    8.26  
    8.27  // VPD field offset
     9.1 --- a/xen/include/asm-ia64/xenkregs.h	Thu Jan 17 12:05:43 2008 -0700
     9.2 +++ b/xen/include/asm-ia64/xenkregs.h	Thu Jan 17 12:05:43 2008 -0700
     9.3 @@ -9,6 +9,8 @@
     9.4  #define IA64_TR_MAPPED_REGS	5	/* dtr5: vcpu mapped regs */
     9.5  #define	IA64_TR_VHPT		6	/* dtr6: vhpt */
     9.6  
     9.7 +#define IA64_TR_VPD		2	/* itr2: vpd */
     9.8 +
     9.9  #define IA64_DTR_GUEST_KERNEL   7
    9.10  #define IA64_ITR_GUEST_KERNEL   2
    9.11  /* Processor status register bits: */