ia64/xen-unstable

changeset 6867:7f9acc83ffcd

Merge vcpu phase 2
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
1. Add r4,r5,r6,r7,eml_unat,rfi_pfs six members at the end of pt_regs,
thus VMM for non-VT domain doesn't need to save/restore SWITCH_STACK
just in case VMM may need modify guest r4,r5,r6,r7 value, eml_unat is saved
in case VMM may need modify guest unat, rfi_pfs is used to emulate guest rfi
instruction for VT domain, and is used as dummy member for alignment of F6,
F7 etc. in pt_regs for non-VT domain. And add code to save/restore r4,r5,r6,
r7,eml_unat in minstate.h and entry.s for non-VT domain. and remove code to
save/restore SWITCH_STACK in ivt.S for non-VT domain.
2. Originally guest banked registers of VT domain are saved directly in VPD. Now
Guest banked registers of VT domain are saved to pt_regs at the entry of VMM,
just for consistence with non-VT domain, vmx_vcpu_bsw0 and vmx_vcpu_bsw1 are
rewriten for accomodating this change.
3. After above changes, all guest registers and nat bits are saved at same place
both for VTI domain and for non-VTI domain, so vcpu_get/set_gr and rse_get/set_gr
are merged, and all these functions can handle guest nats.
4. Merged vcpu_bsw0 and vcpu_bsw1, Now VMM used vgr[16],vbgr[16],vnat,vbnat in mapped_regs_t
struct (is also vpd) to emuate guest banking switch operation.
5. Removed some CONFIG_VTIs and cleaned up some unused structure members and codes.

Note: Credit accidentally omitted from merge vcpu phase 1, included here:

This patch is based on ver 6723. And definitely I can boot dom0 with this patch.
Following things are done in this patch.
1. Merge structure pt_reg.
2. Though vcpu_info structure has been merged, non-vt domain used pointer
vcpu->vcpu_info->arch.privregs, and vt domain used pointer
vcpu->arch.arch_vmx.vpd, the value of these two pointers are different,
that means vt and non-vt domain still use different privileged registers
pages, in this case, we can't merge vcpu.c, so I merged these two pointer,
and put it at vcpu->arch.privregs. vcpu->vcpu_info->arch.privregs and
vcpu->arch.arch_vmx.vpd will not exist. Why put it at vcpu->arch.privregs?
1. There will be one less pointer unreferenced when accessing this
privileged registers page.
2. vcpu->vcpu_info can be accessed by guest, but guest can't access
privileged registers page through this address, guest can access
this privileged page only through another special mapping. So there
is no need to expose this pointer to guest by putting it in
vcpu->vcpu_info structure. All accesses to this page is through
VCPU(vcpu,y) macro,
3. Merged following functions.
Vcpu_set/get_(interruption control registers from cr16 to cr25),
corresponding functions vmx_vcpu_set/get_*** will not exist.
Vcpu->arch.arch_vmx.in_service[4] will not exist, we will all use
vcpu->arch.insvc[4]
4. Cleaned up some unused structure members and codes.

Signed-off-by Anthony Xu <Anthony.xu@intel.com>
author djm@kirby.fc.hp.com
date Mon Sep 19 11:08:20 2005 -0600 (2005-09-19)
parents 8d133d172bfd
children f2309ac2648a
files xen/arch/ia64/asm-offsets.c xen/arch/ia64/linux-xen/entry.S xen/arch/ia64/linux-xen/entry.h xen/arch/ia64/linux-xen/minstate.h xen/arch/ia64/linux-xen/unaligned.c xen/arch/ia64/vmx/mmio.c xen/arch/ia64/vmx/pal_emul.c xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_entry.S xen/arch/ia64/vmx/vmx_hypercall.c xen/arch/ia64/vmx/vmx_interrupt.c xen/arch/ia64/vmx/vmx_ivt.S xen/arch/ia64/vmx/vmx_minstate.h xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/vmx/vmx_virt.c xen/arch/ia64/vmx/vtlb.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/ivt.S xen/arch/ia64/xen/privop.c xen/arch/ia64/xen/process.c xen/arch/ia64/xen/vcpu.c xen/include/asm-ia64/ia64_int.h xen/include/asm-ia64/privop.h xen/include/asm-ia64/vcpu.h xen/include/asm-ia64/vmx_vcpu.h xen/include/asm-ia64/vmx_vpd.h xen/include/asm-ia64/xenkregs.h xen/include/asm-ia64/xensystem.h xen/include/public/arch-ia64.h
line diff
     1.1 --- a/xen/arch/ia64/asm-offsets.c	Sun Sep 18 12:18:57 2005 -0600
     1.2 +++ b/xen/arch/ia64/asm-offsets.c	Mon Sep 19 11:08:20 2005 -0600
     1.3 @@ -138,17 +138,6 @@ void foo(void)
     1.4  	DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
     1.5  	DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
     1.6  	DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
     1.7 -#ifdef CONFIG_VTI
     1.8 -	DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct pt_regs, r4));
     1.9 -	DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct pt_regs, r5));
    1.10 -	DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct pt_regs, r6));
    1.11 -	DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct pt_regs, r7));
    1.12 -	DEFINE(IA64_PT_REGS_CR_IIPA_OFFSET, offsetof (struct pt_regs, cr_iipa));
    1.13 -	DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct pt_regs, cr_isr));
    1.14 -	DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs, eml_unat));
    1.15 -	DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct pt_regs, rfi_pfs));
    1.16 -	DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
    1.17 -#endif  //CONFIG_VTI
    1.18  	DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
    1.19  	DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
    1.20  	DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
    1.21 @@ -172,6 +161,19 @@ void foo(void)
    1.22  	DEFINE(IA64_PT_REGS_F9_OFFSET, offsetof (struct pt_regs, f9));
    1.23  	DEFINE(IA64_PT_REGS_F10_OFFSET, offsetof (struct pt_regs, f10));
    1.24  	DEFINE(IA64_PT_REGS_F11_OFFSET, offsetof (struct pt_regs, f11));
    1.25 +	DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct pt_regs, r4));
    1.26 +	DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct pt_regs, r5));
    1.27 +	DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct pt_regs, r6));
    1.28 +	DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct pt_regs, r7));
    1.29 +	DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs, eml_unat));
    1.30 +	DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct pt_regs, rfi_pfs));
    1.31 +    DEFINE(IA64_VCPU_IIPA_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cr_iipa));
    1.32 +    DEFINE(IA64_VCPU_ISR_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cr_isr));
    1.33 +    DEFINE(IA64_VCPU_CAUSE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cause));
    1.34 +    DEFINE(IA64_VCPU_OPCODE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.opcode));
    1.35 +	DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
    1.36 +    DEFINE(IA64_PT_REGS_R16_SLOT, (((offsetof(struct pt_regs, r16)-sizeof(struct pt_regs))>>3)&0x3f));
    1.37 +	DEFINE(IA64_VCPU_FLAGS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.flags));
    1.38  
    1.39  	BLANK();
    1.40  
     2.1 --- a/xen/arch/ia64/linux-xen/entry.S	Sun Sep 18 12:18:57 2005 -0600
     2.2 +++ b/xen/arch/ia64/linux-xen/entry.S	Mon Sep 19 11:08:20 2005 -0600
     2.3 @@ -633,10 +633,19 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
     2.4  	// new domains are cloned but not exec'ed so switch to user mode here
     2.5  	cmp.ne pKStk,pUStk=r0,r0
     2.6  #ifdef CONFIG_VTI
     2.7 -	br.cond.spnt ia64_leave_hypervisor
     2.8 +    br.cond.spnt ia64_leave_hypervisor
     2.9  #else // CONFIG_VTI
    2.10 -	br.cond.spnt ia64_leave_kernel
    2.11 +    br.cond.spnt ia64_leave_kernel
    2.12  #endif // CONFIG_VTI
    2.13 +
    2.14 +//    adds r16 = IA64_VCPU_FLAGS_OFFSET, r13
    2.15 +//    ;;
    2.16 +//    ld8 r16 = [r16]
    2.17 +//    ;;
    2.18 +//    cmp.ne p6,p7 = r16, r0
    2.19 +//	(p6) br.cond.spnt ia64_leave_hypervisor
    2.20 +//	(p7) br.cond.spnt ia64_leave_kernel
    2.21 +//    ;;
    2.22  #else
    2.23  .ret8:
    2.24  	adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
    2.25 @@ -875,10 +884,16 @@ GLOBAL_ENTRY(ia64_leave_kernel)
    2.26  #ifdef XEN
    2.27  	alloc loc0=ar.pfs,0,1,1,0
    2.28  	adds out0=16,r12
    2.29 +    adds r7 = PT(EML_UNAT)+16,r12
    2.30  	;;
    2.31 +    ld8 r7 = [r7]
    2.32  (p6)	br.call.sptk.many b0=deliver_pending_interrupt
    2.33 +    ;;
    2.34  	mov ar.pfs=loc0
    2.35 +    mov ar.unat=r7  /* load eml_unat  */
    2.36  	mov r31=r0
    2.37 +
    2.38 +
    2.39  #else
    2.40  	adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
    2.41  	;;
    2.42 @@ -957,6 +972,23 @@ GLOBAL_ENTRY(ia64_leave_kernel)
    2.43  	ldf.fill f6=[r2],PT(F7)-PT(F6)
    2.44  	;;
    2.45  	ldf.fill f7=[r2],PT(F11)-PT(F7)
    2.46 +#ifdef XEN
    2.47 +	ldf.fill f8=[r3],PT(R5)-PT(F8)
    2.48 +	;;
    2.49 +	ldf.fill f11=[r2],PT(R4)-PT(F11)
    2.50 +	mov ar.ccv=r15
    2.51 +	;;
    2.52 +	ld8.fill r4=[r2],16
    2.53 +	ld8.fill r5=[r3],16
    2.54 +	;;
    2.55 +	ld8.fill r6=[r2]
    2.56 +	ld8.fill r7=[r3]
    2.57 +	;;
    2.58 +	srlz.d	// ensure that inter. collection is off (VHPT is don't care, since text is pinned)
    2.59 +    ;;
    2.60 +	bsw.0			// switch back to bank 0 (no stop bit required beforehand...)
    2.61 +	;;
    2.62 +#else
    2.63  	ldf.fill f8=[r3],32
    2.64  	;;
    2.65  	srlz.d	// ensure that inter. collection is off (VHPT is don't care, since text is pinned)
    2.66 @@ -965,6 +997,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
    2.67  	ldf.fill f11=[r2]
    2.68  	bsw.0			// switch back to bank 0 (no stop bit required beforehand...)
    2.69  	;;
    2.70 +#endif
    2.71  #ifdef XEN
    2.72  (pUStk) movl r18=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
    2.73  (pUStk) ld8 r18=[r18]
     3.1 --- a/xen/arch/ia64/linux-xen/entry.h	Sun Sep 18 12:18:57 2005 -0600
     3.2 +++ b/xen/arch/ia64/linux-xen/entry.h	Mon Sep 19 11:08:20 2005 -0600
     3.3 @@ -23,19 +23,8 @@
     3.4  
     3.5  #define PT(f)		(IA64_PT_REGS_##f##_OFFSET)
     3.6  #define SW(f)		(IA64_SWITCH_STACK_##f##_OFFSET)
     3.7 -
     3.8 -#ifdef XEN
     3.9 -#ifdef CONFIG_VTI
    3.10 -#define PRED_EMUL		2 /* Need to save r4-r7 for inst emulation */
    3.11 -#define PRED_NON_EMUL		3 /* No need to save r4-r7 for normal path */
    3.12 -#define PRED_BN0		6 /* Guest is in bank 0 */
    3.13 -#define PRED_BN1		7 /* Guest is in bank 1 */
    3.14 -# define pEml		PASTE(p,PRED_EMUL)
    3.15 -# define pNonEml	PASTE(p,PRED_NON_EMUL)
    3.16 -# define pBN0		PASTE(p,PRED_BN0)
    3.17 -# define pBN1		PASTE(p,PRED_BN1)
    3.18 +#ifdef  XEN
    3.19  #define VPD(f)      (VPD_##f##_START_OFFSET)
    3.20 -#endif // CONFIG_VTI
    3.21  #endif
    3.22  
    3.23  #define PT_REGS_SAVES(off)			\
     4.1 --- a/xen/arch/ia64/linux-xen/minstate.h	Sun Sep 18 12:18:57 2005 -0600
     4.2 +++ b/xen/arch/ia64/linux-xen/minstate.h	Mon Sep 19 11:08:20 2005 -0600
     4.3 @@ -241,16 +241,25 @@
     4.4  	stf.spill [r2]=f8,32;			\
     4.5  	stf.spill [r3]=f9,32;			\
     4.6  	;;					\
     4.7 -	stf.spill [r2]=f10;			\
     4.8 -	stf.spill [r3]=f11;			\
     4.9 -	adds r25=PT(B7)-PT(F11),r3;		\
    4.10 +	stf.spill [r2]=f10,32;			\
    4.11 +	stf.spill [r3]=f11,24;			\
    4.12  	;;					\
    4.13 +.mem.offset 0,0; st8.spill [r2]=r4,16;		\
    4.14 +.mem.offset 8,0; st8.spill [r3]=r5,16;		\
    4.15 +	;;					\
    4.16 +.mem.offset 0,0; st8.spill [r2]=r6,16;		\
    4.17 +.mem.offset 8,0; st8.spill [r3]=r7;		\
    4.18 +    adds r25=PT(B7)-PT(R7),r3;     \
    4.19 +    ;;                  \
    4.20  	st8 [r24]=r18,16;       /* b6 */	\
    4.21  	st8 [r25]=r19,16;       /* b7 */	\
    4.22  	;;					\
    4.23  	st8 [r24]=r9;        	/* ar.csd */	\
    4.24 +    mov r26=ar.unat;            \
    4.25 +	;;      \
    4.26  	st8 [r25]=r10;      	/* ar.ssd */	\
    4.27 -	;;
    4.28 +    st8 [r2]=r26;       /* eml_unat */ \
    4.29 +    ;;
    4.30  
    4.31  #define SAVE_MIN_WITH_COVER	DO_SAVE_MIN(cover, mov r30=cr.ifs,)
    4.32  #define SAVE_MIN_WITH_COVER_R19	DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
     5.1 --- a/xen/arch/ia64/linux-xen/unaligned.c	Sun Sep 18 12:18:57 2005 -0600
     5.2 +++ b/xen/arch/ia64/linux-xen/unaligned.c	Mon Sep 19 11:08:20 2005 -0600
     5.3 @@ -201,7 +201,8 @@ static u16 gr_info[32]={
     5.4  
     5.5  	RPT(r1), RPT(r2), RPT(r3),
     5.6  
     5.7 -#if defined(XEN) && defined(CONFIG_VTI)
     5.8 +//#if defined(XEN) && defined(CONFIG_VTI)
     5.9 +#if defined(XEN)
    5.10  	RPT(r4), RPT(r5), RPT(r6), RPT(r7),
    5.11  #else   //CONFIG_VTI
    5.12  	RSW(r4), RSW(r5), RSW(r6), RSW(r7),
    5.13 @@ -295,7 +296,8 @@ rotate_reg (unsigned long sor, unsigned 
    5.14  	return reg;
    5.15  }
    5.16  
    5.17 -#if defined(XEN) && defined(CONFIG_VTI)
    5.18 +//#if defined(XEN) && defined(CONFIG_VTI)
    5.19 +#if defined(XEN)
    5.20  void
    5.21  set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat)
    5.22  {
    5.23 @@ -402,12 +404,14 @@ get_rse_reg (struct pt_regs *regs, unsig
    5.24          bspstore = ia64_get_bspstore();
    5.25      }
    5.26      *val=*addr;
    5.27 -    if(bspstore < rnat_addr){
    5.28 -        *nat=!!(ia64_get_rnat()&nat_mask);
    5.29 -    }else{
    5.30 -        *nat = !!((*rnat_addr)&nat_mask);
    5.31 +    if(nat){
    5.32 +        if(bspstore < rnat_addr){
    5.33 +            *nat=!!(ia64_get_rnat()&nat_mask);
    5.34 +        }else{
    5.35 +            *nat = !!((*rnat_addr)&nat_mask);
    5.36 +        }
    5.37 +        ia64_set_rsc(old_rsc);
    5.38      }
    5.39 -    ia64_set_rsc(old_rsc);
    5.40  }
    5.41  
    5.42  #else // CONFIG_VTI
     6.1 --- a/xen/arch/ia64/vmx/mmio.c	Sun Sep 18 12:18:57 2005 -0600
     6.2 +++ b/xen/arch/ia64/vmx/mmio.c	Mon Sep 19 11:08:20 2005 -0600
     6.3 @@ -419,10 +419,10 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
     6.4          size=(inst.M1.x6&0x3);
     6.5          if((inst.M1.x6>>2)>0xb){      // write
     6.6              dir=IOREQ_WRITE;     //write
     6.7 -            vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data);
     6.8 +            vcpu_get_gr_nat(vcpu,inst.M4.r2,&data);
     6.9          }else if((inst.M1.x6>>2)<0xb){   //  read
    6.10              dir=IOREQ_READ;
    6.11 -            vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
    6.12 +            vcpu_get_gr_nat(vcpu,inst.M1.r1,&value);
    6.13          }
    6.14      }
    6.15      // Integer Load + Reg update
    6.16 @@ -430,11 +430,11 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
    6.17          inst_type = SL_INTEGER;
    6.18          dir = IOREQ_READ;     //write
    6.19          size = (inst.M2.x6&0x3);
    6.20 -        vmx_vcpu_get_gr(vcpu,inst.M2.r1,&value);
    6.21 -        vmx_vcpu_get_gr(vcpu,inst.M2.r3,&temp);
    6.22 -        vmx_vcpu_get_gr(vcpu,inst.M2.r2,&post_update);
    6.23 +        vcpu_get_gr_nat(vcpu,inst.M2.r1,&value);
    6.24 +        vcpu_get_gr_nat(vcpu,inst.M2.r3,&temp);
    6.25 +        vcpu_get_gr_nat(vcpu,inst.M2.r2,&post_update);
    6.26          temp += post_update;
    6.27 -        vmx_vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
    6.28 +        vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
    6.29      }
    6.30      // Integer Load/Store + Imm update
    6.31      else if(inst.M3.major==5){
    6.32 @@ -442,25 +442,25 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
    6.33          size=(inst.M3.x6&0x3);
    6.34          if((inst.M5.x6>>2)>0xb){      // write
    6.35              dir=IOREQ_WRITE;     //write
    6.36 -            vmx_vcpu_get_gr(vcpu,inst.M5.r2,&data);
    6.37 -            vmx_vcpu_get_gr(vcpu,inst.M5.r3,&temp);
    6.38 +            vcpu_get_gr_nat(vcpu,inst.M5.r2,&data);
    6.39 +            vcpu_get_gr_nat(vcpu,inst.M5.r3,&temp);
    6.40              post_update = (inst.M5.i<<7)+inst.M5.imm7;
    6.41              if(inst.M5.s)
    6.42                  temp -= post_update;
    6.43              else
    6.44                  temp += post_update;
    6.45 -            vmx_vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
    6.46 +            vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
    6.47  
    6.48          }else if((inst.M3.x6>>2)<0xb){   //  read
    6.49              dir=IOREQ_READ;
    6.50 -            vmx_vcpu_get_gr(vcpu,inst.M3.r1,&value);
    6.51 -            vmx_vcpu_get_gr(vcpu,inst.M3.r3,&temp);
    6.52 +            vcpu_get_gr_nat(vcpu,inst.M3.r1,&value);
    6.53 +            vcpu_get_gr_nat(vcpu,inst.M3.r3,&temp);
    6.54              post_update = (inst.M3.i<<7)+inst.M3.imm7;
    6.55              if(inst.M3.s)
    6.56                  temp -= post_update;
    6.57              else
    6.58                  temp += post_update;
    6.59 -            vmx_vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
    6.60 +            vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
    6.61  
    6.62          }
    6.63      }
    6.64 @@ -488,7 +488,7 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
    6.65              data = (value & 0xffffffff00000000U) | (data & 0xffffffffU);
    6.66  
    6.67          if(inst_type==SL_INTEGER){       //gp
    6.68 -            vmx_vcpu_set_gr(vcpu,inst.M1.r1,data,0);
    6.69 +            vcpu_set_gr(vcpu,inst.M1.r1,data,0);
    6.70          }else{
    6.71              panic("Don't support ldfd now !");
    6.72  /*            switch(inst.M6.f1){
     7.1 --- a/xen/arch/ia64/vmx/pal_emul.c	Sun Sep 18 12:18:57 2005 -0600
     7.2 +++ b/xen/arch/ia64/vmx/pal_emul.c	Mon Sep 19 11:08:20 2005 -0600
     7.3 @@ -24,18 +24,18 @@ static void
     7.4  get_pal_parameters (VCPU *vcpu, UINT64 *gr29,
     7.5  			UINT64 *gr30, UINT64 *gr31) {
     7.6  
     7.7 -  	vmx_vcpu_get_gr(vcpu,29,gr29);
     7.8 -  	vmx_vcpu_get_gr(vcpu,30,gr30); 
     7.9 -  	vmx_vcpu_get_gr(vcpu,31,gr31);
    7.10 +  	vcpu_get_gr_nat(vcpu,29,gr29);
    7.11 +  	vcpu_get_gr_nat(vcpu,30,gr30); 
    7.12 +  	vcpu_get_gr_nat(vcpu,31,gr31);
    7.13  }
    7.14  
    7.15  static void
    7.16  set_pal_result (VCPU *vcpu,struct ia64_pal_retval result) {
    7.17  
    7.18 -	vmx_vcpu_set_gr(vcpu,8, result.status,0);
    7.19 -	vmx_vcpu_set_gr(vcpu,9, result.v0,0);
    7.20 -	vmx_vcpu_set_gr(vcpu,10, result.v1,0);
    7.21 -	vmx_vcpu_set_gr(vcpu,11, result.v2,0);
    7.22 +	vcpu_set_gr(vcpu,8, result.status,0);
    7.23 +	vcpu_set_gr(vcpu,9, result.v0,0);
    7.24 +	vcpu_set_gr(vcpu,10, result.v1,0);
    7.25 +	vcpu_set_gr(vcpu,11, result.v2,0);
    7.26  }
    7.27  
    7.28  
    7.29 @@ -45,7 +45,7 @@ pal_cache_flush (VCPU *vcpu) {
    7.30  	struct ia64_pal_retval result;
    7.31  
    7.32  	get_pal_parameters (vcpu, &gr29, &gr30, &gr31);
    7.33 -	vmx_vcpu_get_gr(vcpu,28,&gr28);
    7.34 +	vcpu_get_gr_nat(vcpu,28,&gr28);
    7.35  
    7.36  	/* Always call Host Pal in int=1 */
    7.37  	gr30 = gr30 &(~(0x2UL));
    7.38 @@ -236,7 +236,7 @@ pal_emul( VCPU *vcpu) {
    7.39  	struct ia64_pal_retval result;
    7.40  
    7.41  
    7.42 -	vmx_vcpu_get_gr(vcpu,28,&gr28);  //bank1
    7.43 +	vcpu_get_gr_nat(vcpu,28,&gr28);  //bank1
    7.44  
    7.45  	switch (gr28) {
    7.46  		case PAL_CACHE_FLUSH:
     8.1 --- a/xen/arch/ia64/vmx/vmmu.c	Sun Sep 18 12:18:57 2005 -0600
     8.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Mon Sep 19 11:08:20 2005 -0600
     8.3 @@ -713,7 +713,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
     8.4      hcb = vmx_vcpu_get_vtlb(vcpu);
     8.5      vrr=vmx_vcpu_rr(vcpu,vadr);
     8.6      regs=vcpu_regs(vcpu);
     8.7 -    pt_isr.val=regs->cr_isr;
     8.8 +    pt_isr.val=VMX(vcpu,cr_isr);
     8.9      visr.val=0;
    8.10      visr.ei=pt_isr.ei;
    8.11      visr.ir=pt_isr.ir;
     9.1 --- a/xen/arch/ia64/vmx/vmx_entry.S	Sun Sep 18 12:18:57 2005 -0600
     9.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S	Mon Sep 19 11:08:20 2005 -0600
     9.3 @@ -83,7 +83,6 @@ GLOBAL_ENTRY(ia64_leave_nested)
     9.4  	;;
     9.5  	adds r21=PT(PR)+16,r12
     9.6  	;;
     9.7 -
     9.8  	lfetch [r21],PT(CR_IPSR)-PT(PR)
     9.9  	adds r2=PT(B6)+16,r12
    9.10  	adds r3=PT(R16)+16,r12
    9.11 @@ -216,138 +215,90 @@ GLOBAL_ENTRY(ia64_leave_hypervisor)
    9.12      ;;
    9.13      alloc loc0=ar.pfs,0,1,1,0
    9.14      adds out0=16,r12
    9.15 +    adds r7 = PT(EML_UNAT)+16,r12
    9.16      ;;
    9.17 +    ld8 r7 = [r7]
    9.18      br.call.sptk.many b0=leave_hypervisor_tail
    9.19      ;;
    9.20      mov ar.pfs=loc0
    9.21 -    adds r8=IA64_VPD_BASE_OFFSET,r13
    9.22 -    ;;
    9.23 -    ld8 r8=[r8]
    9.24 -    ;;
    9.25 -    adds r9=VPD(VPSR),r8
    9.26 -    ;;
    9.27 -    ld8 r9=[r9]
    9.28 -    ;;
    9.29 -    tbit.z pBN0,pBN1=r9,IA64_PSR_BN_BIT
    9.30 -    ;;
    9.31 -(pBN0) add r7=VPD(VBNAT),r8;
    9.32 -(pBN1) add r7=VPD(VNAT),r8;
    9.33 -    ;;
    9.34 -    ld8 r7=[r7]
    9.35 -    ;;
    9.36      mov ar.unat=r7
    9.37 -(pBN0) add r4=VPD(VBGR),r8;
    9.38 -(pBN1) add r4=VPD(VGR),r8;
    9.39 -(pBN0) add r5=VPD(VBGR)+0x8,r8;
    9.40 -(pBN1) add r5=VPD(VGR)+0x8,r8;
    9.41 -    ;;
    9.42 -    ld8.fill r16=[r4],16
    9.43 -    ld8.fill r17=[r5],16
    9.44 -    ;;
    9.45 -    ld8.fill r18=[r4],16
    9.46 -    ld8.fill r19=[r5],16
    9.47 -    ;;
    9.48 -    ld8.fill r20=[r4],16
    9.49 -    ld8.fill r21=[r5],16
    9.50 -    ;;
    9.51 -    ld8.fill r22=[r4],16
    9.52 -    ld8.fill r23=[r5],16
    9.53 -    ;;
    9.54 -    ld8.fill r24=[r4],16
    9.55 -    ld8.fill r25=[r5],16
    9.56 -    ;;
    9.57 -    ld8.fill r26=[r4],16
    9.58 -    ld8.fill r27=[r5],16
    9.59 -    ;;
    9.60 -    ld8.fill r28=[r4],16
    9.61 -    ld8.fill r29=[r5],16
    9.62 -    ;;
    9.63 -    ld8.fill r30=[r4],16
    9.64 -    ld8.fill r31=[r5],16
    9.65 -    ;;
    9.66 -    bsw.0
    9.67 -    ;;
    9.68 -    mov r18=r8      //vpd
    9.69 -    mov r19=r9      //vpsr
    9.70      adds r20=PT(PR)+16,r12
    9.71      ;;
    9.72      lfetch [r20],PT(CR_IPSR)-PT(PR)
    9.73 -    adds r16=PT(B6)+16,r12
    9.74 -    adds r17=PT(B7)+16,r12
    9.75 +    adds r2 = PT(B6)+16,r12
    9.76 +    adds r3 = PT(B7)+16,r12
    9.77      ;;
    9.78      lfetch [r20]
    9.79 -    mov r21=r13		// get current
    9.80 -    ;;
    9.81 -    ld8 r30=[r16],16      // load b6
    9.82 -    ld8 r31=[r17],16      // load b7
    9.83 -    add r20=PT(EML_UNAT)+16,r12
    9.84 -    ;;
    9.85 -    ld8 r29=[r20]       //load ar_unat
    9.86 -    mov b6=r30
    9.87 -    mov b7=r31
    9.88 -    ld8 r30=[r16],16    //load ar_csd
    9.89 -    ld8 r31=[r17],16    //load ar_ssd
    9.90 -    ;;
    9.91 -    mov ar.unat=r29
    9.92 -    mov ar.csd=r30
    9.93 -    mov ar.ssd=r31
    9.94 -    ;;
    9.95 -    ld8.fill r8=[r16],16    //load r8
    9.96 -    ld8.fill r9=[r17],16    //load r9
    9.97 -    ;;
    9.98 -    ld8.fill r10=[r16],PT(R1)-PT(R10)    //load r10
    9.99 -    ld8.fill r11=[r17],PT(R12)-PT(R11)    //load r11
   9.100 -    ;;
   9.101 -    ld8.fill r1=[r16],16    //load r1
   9.102 -    ld8.fill r12=[r17],16    //load r12
   9.103      ;;
   9.104 -    ld8.fill r13=[r16],16    //load r13
   9.105 -    ld8 r30=[r17],16    //load ar_fpsr
   9.106 -    ;;
   9.107 -    ld8.fill r15=[r16],16    //load r15
   9.108 -    ld8.fill r14=[r17],16    //load r14
   9.109 -    mov ar.fpsr=r30
   9.110 -    ;;
   9.111 -    ld8.fill r2=[r16],16    //load r2
   9.112 -    ld8.fill r3=[r17],16    //load r3
   9.113 -    ;;
   9.114 -/*
   9.115 -(pEml) ld8.fill r4=[r16],16    //load r4
   9.116 -(pEml) ld8.fill r5=[r17],16    //load r5
   9.117 +    ld8 r24=[r2],16        /* B6 */
   9.118 +    ld8 r25=[r3],16        /* B7 */
   9.119      ;;
   9.120 -(pEml) ld8.fill r6=[r16],PT(AR_CCV)-PT(R6)   //load r6
   9.121 -(pEml) ld8.fill r7=[r17],PT(F7)-PT(R7)   //load r7
   9.122 -    ;;
   9.123 -(pNonEml) adds r16=PT(AR_CCV)-PT(R4),r16
   9.124 -(pNonEml) adds r17=PT(F7)-PT(R5),r17
   9.125 +    ld8 r26=[r2],16        /* ar_csd */
   9.126 +    ld8 r27=[r3],16        /* ar_ssd */
   9.127 +    mov b6 = r24
   9.128      ;;
   9.129 -*/
   9.130 -    ld8.fill r4=[r16],16    //load r4
   9.131 -    ld8.fill r5=[r17],16    //load r5
   9.132 -     ;;
   9.133 -    ld8.fill r6=[r16],PT(AR_CCV)-PT(R6)   //load r6
   9.134 -    ld8.fill r7=[r17],PT(F7)-PT(R7)   //load r7
   9.135 +    ld8.fill r8=[r2],16
   9.136 +    ld8.fill r9=[r3],16
   9.137 +    mov b7 = r25
   9.138      ;;
   9.139 -
   9.140 -    ld8 r30=[r16],PT(F6)-PT(AR_CCV)
   9.141 +    mov ar.csd = r26
   9.142 +    mov ar.ssd = r27
   9.143 +    ;;
   9.144 +    ld8.fill r10=[r2],PT(R15)-PT(R10)
   9.145 +    ld8.fill r11=[r3],PT(R14)-PT(R11)
   9.146 +    ;;
   9.147 +    ld8.fill r15=[r2],PT(R16)-PT(R15)
   9.148 +    ld8.fill r14=[r3],PT(R17)-PT(R14)
   9.149 +    ;;
   9.150 +    ld8.fill r16=[r2],16
   9.151 +    ld8.fill r17=[r3],16
   9.152 +    ;;
   9.153 +    ld8.fill r18=[r2],16
   9.154 +    ld8.fill r19=[r3],16
   9.155 +    ;;
   9.156 +    ld8.fill r20=[r2],16
   9.157 +    ld8.fill r21=[r3],16
   9.158 +    ;;
   9.159 +    ld8.fill r22=[r2],16
   9.160 +    ld8.fill r23=[r3],16
   9.161 +    ;;
   9.162 +    ld8.fill r24=[r2],16
   9.163 +    ld8.fill r25=[r3],16
   9.164 +    ;;
   9.165 +    ld8.fill r26=[r2],16
   9.166 +    ld8.fill r27=[r3],16
   9.167 +    ;;
   9.168 +    ld8.fill r28=[r2],16
   9.169 +    ld8.fill r29=[r3],16
   9.170 +    ;;
   9.171 +    ld8.fill r30=[r2],PT(F6)-PT(R30)
   9.172 +    ld8.fill r31=[r3],PT(F7)-PT(R31)
   9.173 +    ;;
   9.174      rsm psr.i | psr.ic  // initiate turning off of interrupt and interruption collection
   9.175 +    invala          // invalidate ALAT
   9.176 +    ;;
   9.177 +    ldf.fill f6=[r2],32
   9.178 +    ldf.fill f7=[r3],32
   9.179 +    ;;
   9.180 +    ldf.fill f8=[r2],32
   9.181 +    ldf.fill f9=[r3],32
   9.182 +    ;;
   9.183 +    ldf.fill f10=[r2],32
   9.184 +    ldf.fill f11=[r3],24
   9.185 +    ;;
   9.186 +    ld8.fill r4=[r2],16    //load r4
   9.187 +    ld8.fill r5=[r3],16    //load r5
   9.188 +    ;;
   9.189 +    ld8.fill r6=[r2]    //load r6
   9.190 +    ld8.fill r7=[r3]    //load r7
   9.191      ;;
   9.192      srlz.i          // ensure interruption collection is off
   9.193      ;;
   9.194 -    invala          // invalidate ALAT
   9.195 -    ;;
   9.196 -    ldf.fill f6=[r16],32
   9.197 -    ldf.fill f7=[r17],32
   9.198 +    bsw.0
   9.199      ;;
   9.200 -    ldf.fill f8=[r16],32
   9.201 -    ldf.fill f9=[r17],32
   9.202 -    ;;
   9.203 -    ldf.fill f10=[r16]
   9.204 -    ldf.fill f11=[r17]
   9.205 -    ;;
   9.206 -    mov ar.ccv=r30
   9.207 -    adds r16=PT(CR_IPSR)-PT(F10),r16
   9.208 -    adds r17=PT(CR_IIP)-PT(F11),r17
   9.209 +    adds r16 = PT(CR_IPSR)+16,r12
   9.210 +    adds r17 = PT(CR_IIP)+16,r12
   9.211 +    mov r21=r13		// get current
   9.212      ;;
   9.213      ld8 r31=[r16],16    // load cr.ipsr
   9.214      ld8 r30=[r17],16    // load cr.iip
   9.215 @@ -358,13 +309,26 @@ GLOBAL_ENTRY(ia64_leave_hypervisor)
   9.216      ld8 r27=[r16],16    // load ar.pfs
   9.217      ld8 r26=[r17],16    // load ar.rsc
   9.218      ;;
   9.219 -    ld8 r25=[r16],16    // load ar.rnat (may be garbage)
   9.220 -    ld8 r24=[r17],16// load ar.bspstore (may be garbage)
   9.221 +    ld8 r25=[r16],16    // load ar.rnat
   9.222 +    ld8 r24=[r17],16    // load ar.bspstore
   9.223      ;;
   9.224      ld8 r23=[r16],16    // load predicates
   9.225 -    ld8 r22=[r17],PT(RFI_PFS)-PT(B0)    // load b0
   9.226 +    ld8 r22=[r17],16    // load b0
   9.227      ;;
   9.228      ld8 r20=[r16],16    // load ar.rsc value for "loadrs"
   9.229 +    ld8.fill r1=[r17],16    //load r1
   9.230 +    ;;
   9.231 +    ld8.fill r12=[r16],16    //load r12
   9.232 +    ld8.fill r13=[r17],PT(R2)-PT(R13)    //load r13
   9.233 +    ;;
   9.234 +    ld8 r19=[r16],PT(R3)-PT(AR_FPSR)    //load ar_fpsr
   9.235 +    ld8.fill r2=[r17],PT(AR_CCV)-PT(R2)    //load r2
   9.236 +    ;;
   9.237 +    ld8.fill r3=[r16]    //load r3
   9.238 +    ld8 r18=[r17],PT(RFI_PFS)-PT(AR_CCV)           //load ar_ccv
   9.239 +    ;;
   9.240 +    mov ar.fpsr=r19
   9.241 +    mov ar.ccv=r18
   9.242      ;;
   9.243  //rbs_switch
   9.244      // loadrs has already been shifted
   9.245 @@ -389,7 +353,13 @@ GLOBAL_ENTRY(ia64_leave_hypervisor)
   9.246      ;;
   9.247  vmx_dorfirfi_back:
   9.248      mov ar.pfs=r27
   9.249 -
   9.250 +    adds r18=IA64_VPD_BASE_OFFSET,r21
   9.251 +    ;;
   9.252 +    ld8 r18=[r18]   //vpd
   9.253 +    ;;
   9.254 +    adds r19=VPD(VPSR),r18
   9.255 +    ;;
   9.256 +    ld8 r19=[r19]        //vpsr
   9.257  //vsa_sync_write_start
   9.258      movl r20=__vsa_base
   9.259      ;;
    10.1 --- a/xen/arch/ia64/vmx/vmx_hypercall.c	Sun Sep 18 12:18:57 2005 -0600
    10.2 +++ b/xen/arch/ia64/vmx/vmx_hypercall.c	Mon Sep 19 11:08:20 2005 -0600
    10.3 @@ -35,7 +35,7 @@
    10.4  void hyper_not_support(void)
    10.5  {
    10.6      VCPU *vcpu=current;
    10.7 -    vmx_vcpu_set_gr(vcpu, 8, -1, 0);
    10.8 +    vcpu_set_gr(vcpu, 8, -1, 0);
    10.9      vmx_vcpu_increment_iip(vcpu);
   10.10  }
   10.11  
   10.12 @@ -43,12 +43,12 @@ void hyper_mmu_update(void)
   10.13  {
   10.14      VCPU *vcpu=current;
   10.15      u64 r32,r33,r34,r35,ret;
   10.16 -    vmx_vcpu_get_gr(vcpu,16,&r32);
   10.17 -    vmx_vcpu_get_gr(vcpu,17,&r33);
   10.18 -    vmx_vcpu_get_gr(vcpu,18,&r34);
   10.19 -    vmx_vcpu_get_gr(vcpu,19,&r35);
   10.20 +    vcpu_get_gr_nat(vcpu,16,&r32);
   10.21 +    vcpu_get_gr_nat(vcpu,17,&r33);
   10.22 +    vcpu_get_gr_nat(vcpu,18,&r34);
   10.23 +    vcpu_get_gr_nat(vcpu,19,&r35);
   10.24      ret=do_mmu_update((mmu_update_t*)r32,r33,r34,r35);
   10.25 -    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
   10.26 +    vcpu_set_gr(vcpu, 8, ret, 0);
   10.27      vmx_vcpu_increment_iip(vcpu);
   10.28  }
   10.29  
   10.30 @@ -65,18 +65,18 @@ unsigned long __hypercall_create_continu
   10.31      if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
   10.32  	panic("PREEMPT happen in multicall\n");	// Not support yet
   10.33      } else {
   10.34 -	vmx_vcpu_set_gr(vcpu, 15, op, 0);
   10.35 +	vcpu_set_gr(vcpu, 15, op, 0);
   10.36  	for ( i = 0; i < nr_args; i++) {
   10.37  	    switch (i) {
   10.38 -	    case 0: vmx_vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
   10.39 -		    break;
   10.40 -	    case 1: vmx_vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
   10.41 +	    case 0: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
   10.42  		    break;
   10.43 -	    case 2: vmx_vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
   10.44 +	    case 1: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
   10.45  		    break;
   10.46 -	    case 3: vmx_vcpu_set_gr(vcpu, 19, va_arg(args, unsigned long), 0);
   10.47 +	    case 2: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
   10.48  		    break;
   10.49 -	    case 4: vmx_vcpu_set_gr(vcpu, 20, va_arg(args, unsigned long), 0);
   10.50 +	    case 3: vcpu_set_gr(vcpu, 19, va_arg(args, unsigned long), 0);
   10.51 +		    break;
   10.52 +	    case 4: vcpu_set_gr(vcpu, 20, va_arg(args, unsigned long), 0);
   10.53  		    break;
   10.54  	    default: panic("Too many args for hypercall continuation\n");
   10.55  		    break;
   10.56 @@ -93,15 +93,15 @@ void hyper_dom_mem_op(void)
   10.57      VCPU *vcpu=current;
   10.58      u64 r32,r33,r34,r35,r36;
   10.59      u64 ret;
   10.60 -    vmx_vcpu_get_gr(vcpu,16,&r32);
   10.61 -    vmx_vcpu_get_gr(vcpu,17,&r33);
   10.62 -    vmx_vcpu_get_gr(vcpu,18,&r34);
   10.63 -    vmx_vcpu_get_gr(vcpu,19,&r35);
   10.64 -    vmx_vcpu_get_gr(vcpu,20,&r36);
   10.65 +    vcpu_get_gr_nat(vcpu,16,&r32);
   10.66 +    vcpu_get_gr_nat(vcpu,17,&r33);
   10.67 +    vcpu_get_gr_nat(vcpu,18,&r34);
   10.68 +    vcpu_get_gr_nat(vcpu,19,&r35);
   10.69 +    vcpu_get_gr_nat(vcpu,20,&r36);
   10.70  //    ret=do_dom_mem_op(r32,(u64 *)r33,r34,r35,r36);
   10.71      ret = 0;
   10.72      printf("do_dom_mem return value: %lx\n", ret);
   10.73 -    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
   10.74 +    vcpu_set_gr(vcpu, 8, ret, 0);
   10.75  
   10.76      /* Hard to define a special return value to indicate hypercall restart.
   10.77       * So just add a new mark, which is SMP safe
   10.78 @@ -117,9 +117,9 @@ void hyper_sched_op(void)
   10.79  {
   10.80      VCPU *vcpu=current;
   10.81      u64 r32,ret;
   10.82 -    vmx_vcpu_get_gr(vcpu,16,&r32);
   10.83 +    vcpu_get_gr_nat(vcpu,16,&r32);
   10.84      ret=do_sched_op(r32);
   10.85 -    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
   10.86 +    vcpu_set_gr(vcpu, 8, ret, 0);
   10.87  
   10.88      vmx_vcpu_increment_iip(vcpu);
   10.89  }
   10.90 @@ -128,9 +128,9 @@ void hyper_dom0_op(void)
   10.91  {
   10.92      VCPU *vcpu=current;
   10.93      u64 r32,ret;
   10.94 -    vmx_vcpu_get_gr(vcpu,16,&r32);
   10.95 +    vcpu_get_gr_nat(vcpu,16,&r32);
   10.96      ret=do_dom0_op((dom0_op_t *)r32);
   10.97 -    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
   10.98 +    vcpu_set_gr(vcpu, 8, ret, 0);
   10.99  
  10.100      vmx_vcpu_increment_iip(vcpu);
  10.101  }
  10.102 @@ -139,9 +139,9 @@ void hyper_event_channel_op(void)
  10.103  {
  10.104      VCPU *vcpu=current;
  10.105      u64 r32,ret;
  10.106 -    vmx_vcpu_get_gr(vcpu,16,&r32);
  10.107 +    vcpu_get_gr_nat(vcpu,16,&r32);
  10.108      ret=do_event_channel_op((evtchn_op_t *)r32);
  10.109 -    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
  10.110 +    vcpu_set_gr(vcpu, 8, ret, 0);
  10.111      vmx_vcpu_increment_iip(vcpu);
  10.112  }
  10.113  
  10.114 @@ -149,9 +149,9 @@ void hyper_xen_version(void)
  10.115  {
  10.116      VCPU *vcpu=current;
  10.117      u64 r32,ret;
  10.118 -    vmx_vcpu_get_gr(vcpu,16,&r32);
  10.119 +    vcpu_get_gr_nat(vcpu,16,&r32);
  10.120      ret=do_xen_version((int )r32);
  10.121 -    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
  10.122 +    vcpu_set_gr(vcpu, 8, ret, 0);
  10.123      vmx_vcpu_increment_iip(vcpu);
  10.124  }
  10.125  
  10.126 @@ -174,10 +174,10 @@ void hyper_lock_page(void)
  10.127  //TODO:
  10.128      VCPU *vcpu=current;
  10.129      u64 va,lock, ret;
  10.130 -    vmx_vcpu_get_gr(vcpu,16,&va);
  10.131 -    vmx_vcpu_get_gr(vcpu,17,&lock);
  10.132 +    vcpu_get_gr_nat(vcpu,16,&va);
  10.133 +    vcpu_get_gr_nat(vcpu,17,&lock);
  10.134      ret=do_lock_page(vcpu, va, lock);
  10.135 -    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
  10.136 +    vcpu_set_gr(vcpu, 8, ret, 0);
  10.137  
  10.138      vmx_vcpu_increment_iip(vcpu);
  10.139  }
  10.140 @@ -213,10 +213,10 @@ void hyper_set_shared_page(void)
  10.141  {
  10.142      VCPU *vcpu=current;
  10.143      u64 gpa,ret;
  10.144 -    vmx_vcpu_get_gr(vcpu,16,&gpa);
  10.145 +    vcpu_get_gr_nat(vcpu,16,&gpa);
  10.146  
  10.147      ret=do_set_shared_page(vcpu, gpa);
  10.148 -    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
  10.149 +    vcpu_set_gr(vcpu, 8, ret, 0);
  10.150  
  10.151      vmx_vcpu_increment_iip(vcpu);
  10.152  }
  10.153 @@ -226,11 +226,11 @@ void hyper_grant_table_op(void)
  10.154  {
  10.155      VCPU *vcpu=current;
  10.156      u64 r32,r33,r34,ret;
  10.157 -    vmx_vcpu_get_gr(vcpu,16,&r32);
  10.158 -    vmx_vcpu_get_gr(vcpu,17,&r33);
  10.159 -    vmx_vcpu_get_gr(vcpu,18,&r34);
  10.160 +    vcpu_get_gr_nat(vcpu,16,&r32);
  10.161 +    vcpu_get_gr_nat(vcpu,17,&r33);
  10.162 +    vcpu_get_gr_nat(vcpu,18,&r34);
  10.163  
  10.164      ret=do_grant_table_op((unsigned int)r32, (void *)r33, (unsigned int)r34);
  10.165 -    vmx_vcpu_set_gr(vcpu, 8, ret, 0);
  10.166 +    vcpu_set_gr(vcpu, 8, ret, 0);
  10.167  }
  10.168  */
    11.1 --- a/xen/arch/ia64/vmx/vmx_interrupt.c	Sun Sep 18 12:18:57 2005 -0600
    11.2 +++ b/xen/arch/ia64/vmx/vmx_interrupt.c	Mon Sep 19 11:08:20 2005 -0600
    11.3 @@ -37,11 +37,8 @@ collect_interruption(VCPU *vcpu)
    11.4      IA64_PSR vpsr;
    11.5      REGS * regs = vcpu_regs(vcpu);
    11.6      vpsr.val = vmx_vcpu_get_psr(vcpu);
    11.7 -
    11.8 +    vcpu_bsw0(vcpu);
    11.9      if(vpsr.ic){
   11.10 -	extern void vmx_dorfirfi(void);
   11.11 -	if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi)
   11.12 -		panic("COLLECT interruption for vmx_dorfirfi\n");
   11.13  
   11.14          /* Sync mpsr id/da/dd/ss/ed bits to vipsr
   11.15           * since after guest do rfi, we still want these bits on in
   11.16 @@ -65,7 +62,7 @@ collect_interruption(VCPU *vcpu)
   11.17          vifs &= ~IA64_IFS_V;
   11.18          vcpu_set_ifs(vcpu, vifs);
   11.19  
   11.20 -        vcpu_set_iipa(vcpu, regs->cr_iipa);
   11.21 +        vcpu_set_iipa(vcpu, VMX(vcpu,cr_iipa));
   11.22      }
   11.23  
   11.24      vdcr = VCPU(vcpu,dcr);
   11.25 @@ -88,6 +85,7 @@ collect_interruption(VCPU *vcpu)
   11.26      vmx_vcpu_set_psr(vcpu, vpsr.val);
   11.27  
   11.28  }
   11.29 +
   11.30  int
   11.31  inject_guest_interruption(VCPU *vcpu, u64 vec)
   11.32  {
    12.1 --- a/xen/arch/ia64/vmx/vmx_ivt.S	Sun Sep 18 12:18:57 2005 -0600
    12.2 +++ b/xen/arch/ia64/vmx/vmx_ivt.S	Mon Sep 19 11:08:20 2005 -0600
    12.3 @@ -690,9 +690,14 @@ END(vmx_single_step_trap)
    12.4  /////////////////////////////////////////////////////////////////////////////////////////
    12.5  // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
    12.6  ENTRY(vmx_virtualization_fault)
    12.7 -	VMX_DBG_FAULT(37)
    12.8  	mov r31=pr
    12.9      mov r19=37
   12.10 +    adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
   12.11 +    adds r17 = IA64_VCPU_OPCODE_OFFSET,r21
   12.12 +    ;;
   12.13 +    st8 [r16] = r24
   12.14 +    st8 [r17] = r25
   12.15 +    ;;
   12.16      br.sptk vmx_dispatch_virtualization_fault
   12.17  END(vmx_virtualization_fault)
   12.18  
   12.19 @@ -897,44 +902,45 @@ ENTRY(vmx_dispatch_reflection)
   12.20       *  r31:    contains saved predicates (pr)
   12.21       */
   12.22      VMX_SAVE_MIN_WITH_COVER_R19
   12.23 -    alloc r14=ar.pfs,0,0,4,0
   12.24 +    alloc r14=ar.pfs,0,0,5,0
   12.25      mov out0=cr.ifa
   12.26      mov out1=cr.isr
   12.27      mov out2=cr.iim
   12.28      mov out3=r15
   12.29 -
   12.30 +    adds r3=8,r2                // set up second base pointer
   12.31 +    ;;
   12.32      ssm psr.ic
   12.33      ;;
   12.34      srlz.i                  // guarantee that interruption collection is on
   12.35      ;;
   12.36      (p15) ssm psr.i               // restore psr.i
   12.37 -    adds r3=16,r2                // set up second base pointer
   12.38 +    movl r14=ia64_leave_hypervisor
   12.39      ;;
   12.40      VMX_SAVE_REST
   12.41 -    movl r14=ia64_leave_hypervisor
   12.42 +    mov rp=r14
   12.43      ;;
   12.44 -    mov rp=r14
   12.45 +    adds out4=16,r12
   12.46      br.call.sptk.many b6=vmx_reflect_interruption
   12.47  END(vmx_dispatch_reflection)
   12.48  
   12.49  ENTRY(vmx_dispatch_virtualization_fault)
   12.50      VMX_SAVE_MIN_WITH_COVER_R19
   12.51      ;;
   12.52 -    alloc r14=ar.pfs,0,0,3,0        // now it's safe (must be first in insn group!)
   12.53 +    alloc r14=ar.pfs,0,0,2,0        // now it's safe (must be first in insn group!)
   12.54      mov out0=r13        //vcpu
   12.55 -    mov out1=r4         //cause
   12.56 -    mov out2=r5         //opcode
   12.57 +    adds r3=8,r2                // set up second base pointer
   12.58 +    ;;
   12.59      ssm psr.ic
   12.60      ;;
   12.61      srlz.i                  // guarantee that interruption collection is on
   12.62      ;;
   12.63      (p15) ssm psr.i               // restore psr.i
   12.64 -    adds r3=16,r2                // set up second base pointer
   12.65 +    movl r14=ia64_leave_hypervisor
   12.66      ;;
   12.67      VMX_SAVE_REST
   12.68 -    movl r14=ia64_leave_hypervisor
   12.69 +    mov rp=r14
   12.70      ;;
   12.71 -    mov rp=r14
   12.72 +    adds out1=16,sp         //regs
   12.73      br.call.sptk.many b6=vmx_emulate
   12.74  END(vmx_dispatch_virtualization_fault)
   12.75  
   12.76 @@ -949,7 +955,7 @@ ENTRY(vmx_dispatch_vexirq)
   12.77      srlz.i                  // guarantee that interruption collection is on
   12.78      ;;
   12.79      (p15) ssm psr.i               // restore psr.i
   12.80 -    adds r3=16,r2                // set up second base pointer
   12.81 +    adds r3=8,r2                // set up second base pointer
   12.82      ;;
   12.83      VMX_SAVE_REST
   12.84      movl r14=ia64_leave_hypervisor
   12.85 @@ -961,21 +967,21 @@ END(vmx_dispatch_vexirq)
   12.86  ENTRY(vmx_dispatch_tlb_miss)
   12.87      VMX_SAVE_MIN_WITH_COVER_R19
   12.88      alloc r14=ar.pfs,0,0,3,0
   12.89 -    mov out0=r13
   12.90 +    mov out0=cr.ifa
   12.91      mov out1=r15
   12.92 -    mov out2=cr.ifa
   12.93 -
   12.94 +    adds r3=8,r2                // set up second base pointer
   12.95 +    ;;
   12.96      ssm psr.ic
   12.97      ;;
   12.98      srlz.i                  // guarantee that interruption collection is on
   12.99      ;;
  12.100      (p15) ssm psr.i               // restore psr.i
  12.101 -    adds r3=16,r2                // set up second base pointer
  12.102 +    movl r14=ia64_leave_hypervisor
  12.103      ;;
  12.104      VMX_SAVE_REST
  12.105 -    movl r14=ia64_leave_hypervisor
  12.106 +    mov rp=r14
  12.107      ;;
  12.108 -    mov rp=r14
  12.109 +    adds out2=16,r12
  12.110      br.call.sptk.many b6=vmx_hpw_miss
  12.111  END(vmx_dispatch_tlb_miss)
  12.112  
  12.113 @@ -986,21 +992,21 @@ ENTRY(vmx_dispatch_break_fault)
  12.114      ;;
  12.115      alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
  12.116      mov out0=cr.ifa
  12.117 -    adds out1=16,sp
  12.118      mov out2=cr.isr     // FIXME: pity to make this slow access twice
  12.119      mov out3=cr.iim     // FIXME: pity to make this slow access twice
  12.120 -
  12.121 +    adds r3=8,r2                // set up second base pointer
  12.122 +    ;;
  12.123      ssm psr.ic
  12.124      ;;
  12.125      srlz.i                  // guarantee that interruption collection is on
  12.126      ;;
  12.127      (p15)ssm psr.i               // restore psr.i
  12.128 -    adds r3=16,r2                // set up second base pointer
  12.129 +    movl r14=ia64_leave_hypervisor
  12.130      ;;
  12.131      VMX_SAVE_REST
  12.132 -    movl r14=ia64_leave_hypervisor
  12.133 +    mov rp=r14
  12.134      ;;
  12.135 -    mov rp=r14
  12.136 +    adds out1=16,sp
  12.137      br.call.sptk.many b6=vmx_ia64_handle_break
  12.138      ;;
  12.139  END(vmx_dispatch_break_fault)
  12.140 @@ -1013,7 +1019,7 @@ ENTRY(vmx_hypercall_dispatch)
  12.141      srlz.i                  // guarantee that interruption collection is on
  12.142      ;;
  12.143      (p15) ssm psr.i               // restore psr.i
  12.144 -    adds r3=16,r2                // set up second base pointer
  12.145 +    adds r3=8,r2                // set up second base pointer
  12.146      ;;
  12.147      VMX_SAVE_REST
  12.148      ;;
  12.149 @@ -1038,19 +1044,19 @@ ENTRY(vmx_dispatch_interrupt)
  12.150  	;;
  12.151  	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
  12.152  	mov out0=cr.ivr		// pass cr.ivr as first arg
  12.153 -	add out1=16,sp		// pass pointer to pt_regs as second arg
  12.154 -
  12.155 +	adds r3=8,r2		// set up second base pointer for SAVE_REST
  12.156 +    ;;
  12.157  	ssm psr.ic
  12.158  	;;
  12.159      srlz.i
  12.160      ;;
  12.161      (p15) ssm psr.i
  12.162 -	adds r3=16,r2		// set up second base pointer for SAVE_REST
  12.163 +	movl r14=ia64_leave_hypervisor
  12.164  	;;
  12.165  	VMX_SAVE_REST
  12.166 -	movl r14=ia64_leave_hypervisor
  12.167 +	mov rp=r14
  12.168  	;;
  12.169 -	mov rp=r14
  12.170 +	add out1=16,sp		// pass pointer to pt_regs as second arg
  12.171  	br.call.sptk.many b6=vmx_ia64_handle_irq
  12.172  END(vmx_dispatch_interrupt)
  12.173  
    13.1 --- a/xen/arch/ia64/vmx/vmx_minstate.h	Sun Sep 18 12:18:57 2005 -0600
    13.2 +++ b/xen/arch/ia64/vmx/vmx_minstate.h	Mon Sep 19 11:08:20 2005 -0600
    13.3 @@ -65,7 +65,6 @@
    13.4      ld8 r25=[r25];      /* read vpd base */     \
    13.5      ld8 r20=[r20];      /* read entry point */  \
    13.6      ;;      \
    13.7 -    mov r6=r25;     \
    13.8      add r20=PAL_VPS_SYNC_READ,r20;  \
    13.9      ;;  \
   13.10  { .mii;  \
   13.11 @@ -80,21 +79,19 @@
   13.12      br.cond.sptk b0;        /*  call the service */ \
   13.13      ;;              \
   13.14  };           \
   13.15 -    ld8 r7=[r22];   \
   13.16 +    ld8 r17=[r22];   \
   13.17      /* deposite ipsr bit cpl into vpd.vpsr, since epc will change */    \
   13.18      extr.u r30=r16, IA64_PSR_CPL0_BIT, 2;   \
   13.19      ;;      \
   13.20 -    dep r7=r30, r7, IA64_PSR_CPL0_BIT, 2;   \
   13.21 -    ;;      \
   13.22 +    dep r17=r30, r17, IA64_PSR_CPL0_BIT, 2;   \
   13.23      extr.u r30=r16, IA64_PSR_BE_BIT, 5;   \
   13.24      ;;      \
   13.25 -    dep r7=r30, r7, IA64_PSR_BE_BIT, 5;   \
   13.26 -    ;;      \
   13.27 +    dep r17=r30, r17, IA64_PSR_BE_BIT, 5;   \
   13.28      extr.u r30=r16, IA64_PSR_RI_BIT, 2;   \
   13.29      ;;      \
   13.30 -    dep r7=r30, r7, IA64_PSR_RI_BIT, 2;   \
   13.31 +    dep r17=r30, r17, IA64_PSR_RI_BIT, 2;   \
   13.32      ;;      \
   13.33 -    st8 [r22]=r7;      \
   13.34 +    st8 [r22]=r17;      \
   13.35      ;;
   13.36  
   13.37  
   13.38 @@ -156,12 +153,14 @@
   13.39      VMX_MINSTATE_GET_CURRENT(r16);  /* M (or M;;I) */                   \
   13.40      mov r27=ar.rsc;         /* M */                         \
   13.41      mov r20=r1;         /* A */                         \
   13.42 -    mov r26=ar.unat;        /* M */                         \
   13.43 +    mov r25=ar.unat;        /* M */                         \
   13.44      mov r29=cr.ipsr;        /* M */                         \
   13.45 +    mov r26=ar.pfs;         /* I */                     \
   13.46      mov r18=cr.isr;         \
   13.47      COVER;              /* B;; (or nothing) */                  \
   13.48      ;;                                          \
   13.49      tbit.z p6,p0=r29,IA64_PSR_VM_BIT;       \
   13.50 +    ;;      \
   13.51      tbit.nz.or p6,p0 = r18,39; \
   13.52      ;;        \
   13.53  (p6) br.sptk.few vmx_panic;        \
   13.54 @@ -193,7 +192,6 @@
   13.55  .mem.offset 0,0; st8.spill [r16]=r10,24;                            \
   13.56  .mem.offset 8,0; st8.spill [r17]=r11,24;                            \
   13.57          ;;                                          \
   13.58 -    mov r8=ar.pfs;         /* I */                         \
   13.59      mov r9=cr.iip;         /* M */                         \
   13.60      mov r10=ar.fpsr;        /* M */                         \
   13.61          ;;                      \
   13.62 @@ -201,8 +199,8 @@
   13.63      st8 [r17]=r30,16;   /* save cr.ifs */                       \
   13.64      sub r18=r18,r22;    /* r18=RSE.ndirty*8 */                      \
   13.65      ;;          \
   13.66 -    st8 [r16]=r26,16;   /* save ar.unat */                      \
   13.67 -    st8 [r17]=r8,16;    /* save ar.pfs */                       \
   13.68 +    st8 [r16]=r25,16;   /* save ar.unat */                      \
   13.69 +    st8 [r17]=r26,16;    /* save ar.pfs */                       \
   13.70      shl r18=r18,16;     /* compute ar.rsc to be used for "loadrs" */            \
   13.71      ;;                                          \
   13.72      st8 [r16]=r27,16;   /* save ar.rsc */                       \
   13.73 @@ -227,32 +225,18 @@
   13.74      ;;                                          \
   13.75  .mem.offset 0,0; st8.spill [r16]=r2,16;                             \
   13.76  .mem.offset 8,0; st8.spill [r17]=r3,16;                             \
   13.77 -    adds r2=PT(F6),r1;                         \
   13.78 -    ;;                                          \
   13.79 - .mem.offset 0,0; st8.spill [r16]=r4,16;                             \
   13.80 - .mem.offset 8,0; st8.spill [r17]=r5,16;                             \
   13.81 -    ;;          \
   13.82 - .mem.offset 0,0; st8.spill [r16]=r6,16;     \
   13.83 - .mem.offset 8,0; st8.spill [r17]=r7,16;     \
   13.84 -    mov r20=ar.ccv;      \
   13.85 -    ;;  \
   13.86 -  mov r18=cr.iipa;  \
   13.87 -  mov r4=cr.isr;   \
   13.88 -  mov r22=ar.unat;    \
   13.89 +    adds r2=IA64_PT_REGS_R16_OFFSET,r1;                         \
   13.90 +     ;;  \
   13.91 +    adds r16=IA64_VCPU_IIPA_OFFSET,r13;                       \
   13.92 +    adds r17=IA64_VCPU_ISR_OFFSET,r13;                       \
   13.93 +    mov r26=cr.iipa;  \
   13.94 +    mov r27=cr.isr;   \
   13.95 +    ;;      \
   13.96 +    st8 [r16]=r26;      \
   13.97 +    st8 [r17]=r27;      \
   13.98      ;;  \
   13.99 -  st8 [r16]=r18,16;      \
  13.100 -  st8 [r17]=r4;      \
  13.101 -    ;;      \
  13.102 -    adds r16=PT(EML_UNAT),r1;   \
  13.103 -    adds r17=PT(AR_CCV),r1;                 \
  13.104 -    ;;                      \
  13.105 -    st8 [r16]=r22,8;     \
  13.106 -    st8 [r17]=r20;       \
  13.107 -    mov r4=r24;         \
  13.108 -    mov r5=r25;         \
  13.109 -     ;;  \
  13.110 -    st8 [r16]=r0;  \
  13.111      EXTRA;                                          \
  13.112 +    mov r8=ar.ccv;          \
  13.113      mov r9=ar.csd;                                      \
  13.114      mov r10=ar.ssd;                                     \
  13.115      movl r11=FPSR_DEFAULT;   /* L-unit */                           \
  13.116 @@ -268,9 +252,7 @@
  13.117   *  psr.ic: on
  13.118   *  r2: points to &pt_regs.f6
  13.119   *  r3: points to &pt_regs.f7
  13.120 - *  r4,r5,scrach
  13.121 - *  r6: points to vpd
  13.122 - *  r7: vpsr
  13.123 + *  r8: contents of ar.ccv
  13.124   *  r9: contents of ar.csd
  13.125   *  r10:    contents of ar.ssd
  13.126   *  r11:    FPSR_DEFAULT
  13.127 @@ -278,46 +260,35 @@
  13.128   * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
  13.129   */
  13.130  #define VMX_SAVE_REST               \
  13.131 -    tbit.z pBN0,pBN1=r7,IA64_PSR_BN_BIT;  /* guest bank0 or bank1 ? */      \
  13.132 -    ;;      \
  13.133 -(pBN0) add r4=VPD(VBGR),r6;     \
  13.134 -(pBN0) add r5=VPD(VBGR)+0x8,r6;     \
  13.135 -(pBN0) add r7=VPD(VBNAT),r6;     \
  13.136 -    ;;      \
  13.137 -(pBN1) add r5=VPD(VGR)+0x8,r6;      \
  13.138 -(pBN1) add r4=VPD(VGR),r6;      \
  13.139 -(pBN1) add r7=VPD(VNAT),r6;      \
  13.140 -    ;;      \
  13.141 -.mem.offset 0,0; st8.spill [r4]=r16,16;     \
  13.142 -.mem.offset 8,0; st8.spill [r5]=r17,16;     \
  13.143 -    ;;                  \
  13.144 -.mem.offset 0,0; st8.spill [r4]=r18,16;     \
  13.145 -.mem.offset 8,0; st8.spill [r5]=r19,16;     \
  13.146 -    ;;                  \
  13.147 -.mem.offset 0,0; st8.spill [r4]=r20,16;     \
  13.148 -.mem.offset 8,0; st8.spill [r5]=r21,16;     \
  13.149 +.mem.offset 0,0; st8.spill [r2]=r16,16;     \
  13.150 +.mem.offset 8,0; st8.spill [r3]=r17,16;     \
  13.151      ;;                  \
  13.152 -.mem.offset 0,0; st8.spill [r4]=r22,16;     \
  13.153 -.mem.offset 8,0; st8.spill [r5]=r23,16;     \
  13.154 -    ;;                  \
  13.155 -.mem.offset 0,0; st8.spill [r4]=r24,16;     \
  13.156 -.mem.offset 8,0; st8.spill [r5]=r25,16;     \
  13.157 -    ;;                  \
  13.158 -.mem.offset 0,0; st8.spill [r4]=r26,16;     \
  13.159 -.mem.offset 8,0; st8.spill [r5]=r27,16;     \
  13.160 +.mem.offset 0,0; st8.spill [r2]=r18,16;     \
  13.161 +.mem.offset 8,0; st8.spill [r3]=r19,16;     \
  13.162      ;;                  \
  13.163 -.mem.offset 0,0; st8.spill [r4]=r28,16;     \
  13.164 -.mem.offset 8,0; st8.spill [r5]=r29,16;     \
  13.165 -    mov r26=b6;         \
  13.166 +.mem.offset 0,0; st8.spill [r2]=r20,16;     \
  13.167 +.mem.offset 8,0; st8.spill [r3]=r21,16;     \
  13.168 +    mov r18=b6;         \
  13.169      ;;                  \
  13.170 -.mem.offset 0,0; st8.spill [r4]=r30,16;     \
  13.171 -.mem.offset 8,0; st8.spill [r5]=r31,16;     \
  13.172 -    mov r27=b7;     \
  13.173 +.mem.offset 0,0; st8.spill [r2]=r22,16;     \
  13.174 +.mem.offset 8,0; st8.spill [r3]=r23,16;     \
  13.175 +    mov r19=b7;     \
  13.176      ;;                  \
  13.177 -    mov r30=ar.unat;    \
  13.178 -    ;;      \
  13.179 -    st8 [r7]=r30;       \
  13.180 -    mov ar.fpsr=r11;    /* M-unit */    \
  13.181 +.mem.offset 0,0; st8.spill [r2]=r24,16;     \
  13.182 +.mem.offset 8,0; st8.spill [r3]=r25,16;     \
  13.183 +    ;;                  \
  13.184 +.mem.offset 0,0; st8.spill [r2]=r26,16;     \
  13.185 +.mem.offset 8,0; st8.spill [r3]=r27,16;     \
  13.186 +    ;;                  \
  13.187 +.mem.offset 0,0; st8.spill [r2]=r28,16;     \
  13.188 +.mem.offset 8,0; st8.spill [r3]=r29,16;     \
  13.189 +    ;;                  \
  13.190 +.mem.offset 0,0; st8.spill [r2]=r30,16;     \
  13.191 +.mem.offset 8,0; st8.spill [r3]=r31,32;     \
  13.192 +    ;;                  \
  13.193 +    mov ar.fpsr=r11;     \
  13.194 +    st8 [r2]=r8,8;       \
  13.195 +    adds r24=PT(B6)-PT(F7),r3;      \
  13.196      ;;                  \
  13.197      stf.spill [r2]=f6,32;           \
  13.198      stf.spill [r3]=f7,32;           \
  13.199 @@ -325,17 +296,24 @@
  13.200      stf.spill [r2]=f8,32;           \
  13.201      stf.spill [r3]=f9,32;           \
  13.202      ;;                  \
  13.203 -    stf.spill [r2]=f10;         \
  13.204 -    stf.spill [r3]=f11;         \
  13.205 +    stf.spill [r2]=f10,32;         \
  13.206 +    stf.spill [r3]=f11,24;         \
  13.207      ;;                  \
  13.208 -    adds r2=PT(B6)-PT(F10),r2;      \
  13.209 -    adds r3=PT(B7)-PT(F11),r3;      \
  13.210 -    ;;          \
  13.211 -    st8 [r2]=r26,16;       /* b6 */    \
  13.212 -    st8 [r3]=r27,16;       /* b7 */    \
  13.213 +.mem.offset 0,0; st8.spill [r2]=r4,16;     \
  13.214 +.mem.offset 8,0; st8.spill [r3]=r5,16;     \
  13.215      ;;                  \
  13.216 -    st8 [r2]=r9;           /* ar.csd */    \
  13.217 -    st8 [r3]=r10;          /* ar.ssd */    \
  13.218 +.mem.offset 0,0; st8.spill [r2]=r6,16;      \
  13.219 +.mem.offset 8,0; st8.spill [r3]=r7;      \
  13.220 +    adds r25=PT(B7)-PT(R7),r3;     \
  13.221 +    ;;                  \
  13.222 +    st8 [r24]=r18,16;       /* b6 */    \
  13.223 +    st8 [r25]=r19,16;       /* b7 */    \
  13.224 +    ;;                  \
  13.225 +    st8 [r24]=r9;           /* ar.csd */    \
  13.226 +    mov r26=ar.unat;            \
  13.227 +    ;;      \
  13.228 +    st8 [r25]=r10;          /* ar.ssd */    \
  13.229 +    st8 [r2]=r26;       /* eml_unat */ \
  13.230      ;;
  13.231  
  13.232  #define VMX_SAVE_MIN_WITH_COVER   VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,)
    14.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Sun Sep 18 12:18:57 2005 -0600
    14.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Mon Sep 19 11:08:20 2005 -0600
    14.3 @@ -72,8 +72,8 @@ vmx_ia64_handle_break (unsigned long ifa
    14.4  		first_time = 0;
    14.5  	}
    14.6  	if (iim == 0x80001 || iim == 0x80002) {	//FIXME: don't hardcode constant
    14.7 -		if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
    14.8 -		else do_ssc(vcpu_get_gr(current,36), regs);
    14.9 +		if (running_on_sim) do_ssc(vcpu_get_gr_nat(current,36), regs);
   14.10 +		else do_ssc(vcpu_get_gr_nat(current,36), regs);
   14.11  	}
   14.12  #endif
   14.13  	if (iim == d->arch.breakimm) {
   14.14 @@ -93,7 +93,7 @@ vmx_ia64_handle_break (unsigned long ifa
   14.15  			break;
   14.16  		    case FW_HYPERCALL_SAL_CALL:
   14.17  			for (i = 0; i < 8; i++)
   14.18 -				vmx_vcpu_get_gr(v, 32+i, &sal_param[i]);
   14.19 +				vcpu_get_gr_nat(v, 32+i, &sal_param[i]);
   14.20  			x = sal_emulator(sal_param[0], sal_param[1],
   14.21  					 sal_param[2], sal_param[3],
   14.22  					 sal_param[4], sal_param[5],
   14.23 @@ -118,8 +118,8 @@ vmx_ia64_handle_break (unsigned long ifa
   14.24  		    case FW_HYPERCALL_EFI_GET_TIME:
   14.25  			{
   14.26  			unsigned long *tv, *tc;
   14.27 -			vmx_vcpu_get_gr(v, 32, &tv);
   14.28 -			vmx_vcpu_get_gr(v, 33, &tc);
   14.29 +			vcpu_get_gr_nat(v, 32, &tv);
   14.30 +			vcpu_get_gr_nat(v, 33, &tc);
   14.31  			printf("efi_get_time(%p,%p) called...",tv,tc);
   14.32  			tv = __va(translate_domain_mpaddr(tv));
   14.33  			if (tc) tc = __va(translate_domain_mpaddr(tc));
   14.34 @@ -154,7 +154,7 @@ vmx_ia64_handle_break (unsigned long ifa
   14.35          pal_emul(current);
   14.36  		vmx_vcpu_increment_iip(current);
   14.37      }  else
   14.38 -		vmx_reflect_interruption(ifa,isr,iim,11);
   14.39 +		vmx_reflect_interruption(ifa,isr,iim,11,regs);
   14.40  }
   14.41  
   14.42  static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800,
   14.43 @@ -170,10 +170,9 @@ static UINT64 vec2off[68] = {0x0,0x400,0
   14.44  
   14.45  
   14.46  void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
   14.47 -     UINT64 vector)
   14.48 +     UINT64 vector,REGS *regs)
   14.49  {
   14.50      VCPU *vcpu = current;
   14.51 -    REGS *regs=vcpu_regs(vcpu);
   14.52      UINT64 viha,vpsr = vmx_vcpu_get_psr(vcpu);
   14.53      if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){
   14.54          panic("Guest nested fault!");
   14.55 @@ -189,6 +188,36 @@ void vmx_reflect_interruption(UINT64 ifa
   14.56      inject_guest_interruption(vcpu, vector);
   14.57  }
   14.58  
   14.59 +
   14.60 +void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
   14.61 +{
   14.62 +    unsigned long i, * src,* dst, *sunat, *dunat;
   14.63 +    IA64_PSR vpsr;
   14.64 +    src=&regs->r16;
   14.65 +    sunat=&regs->eml_unat;
   14.66 +    vpsr.val = vmx_vcpu_get_psr(v);
   14.67 +    if(vpsr.bn){
   14.68 +        dst = &VCPU(v, vgr[0]);
   14.69 +        dunat =&VCPU(v, vnat);
   14.70 +        __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
   14.71 +                            dep %2 = %0, %2, 0, 16;;
   14.72 +                            st8 [%3] = %2;;"
   14.73 +       ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
   14.74 +
   14.75 +    }else{
   14.76 +        dst = &VCPU(v, vbgr[0]);
   14.77 +//        dunat =&VCPU(v, vbnat);
   14.78 +//        __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
   14.79 +//                            dep %2 = %0, %2, 16, 16;;
   14.80 +//                            st8 [%3] = %2;;"
   14.81 +//       ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
   14.82 +
   14.83 +    }
   14.84 +    for(i=0; i<16; i++)
   14.85 +        *dst++ = *src++;
   14.86 +}
   14.87 +
   14.88 +
   14.89  // ONLY gets called from ia64_leave_kernel
   14.90  // ONLY call with interrupts disabled?? (else might miss one?)
   14.91  // NEVER successful if already reflecting a trap/fault because psr.i==0
   14.92 @@ -200,7 +229,6 @@ void leave_hypervisor_tail(struct pt_reg
   14.93  	if (!is_idle_task(d) ) {	// always comes from guest
   14.94  	        extern void vmx_dorfirfi(void);
   14.95  		struct pt_regs *user_regs = vcpu_regs(current);
   14.96 -
   14.97   		if (local_softirq_pending())
   14.98   			do_softirq();
   14.99  		local_irq_disable();
  14.100 @@ -224,18 +252,22 @@ void leave_hypervisor_tail(struct pt_reg
  14.101   			VCPU(v, irr[0]) |= 1UL << 0x10;
  14.102   			v->arch.irq_new_pending = 1;
  14.103   		}
  14.104 - 
  14.105 +
  14.106   		if ( v->arch.irq_new_pending ) {
  14.107   			v->arch.irq_new_pending = 0;
  14.108   			vmx_check_pending_irq(v);
  14.109   		}
  14.110 +//        if (VCPU(v,vac).a_bsw){
  14.111 +//            save_banked_regs_to_vpd(v,regs);
  14.112 +//        }
  14.113 +
  14.114  	}
  14.115  }
  14.116  
  14.117  extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
  14.118  
  14.119  /* We came here because the H/W VHPT walker failed to find an entry */
  14.120 -void vmx_hpw_miss(VCPU *vcpu, u64 vec, u64 vadr)
  14.121 +void vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
  14.122  {
  14.123      IA64_PSR vpsr;
  14.124      CACHE_LINE_TYPE type;
  14.125 @@ -245,16 +277,17 @@ void vmx_hpw_miss(VCPU *vcpu, u64 vec, u
  14.126      REGS *regs;
  14.127      thash_cb_t *vtlb, *vhpt;
  14.128      thash_data_t *data, me;
  14.129 -    vtlb=vmx_vcpu_get_vtlb(vcpu);
  14.130 +    VCPU *v = current;
  14.131 +    vtlb=vmx_vcpu_get_vtlb(v);
  14.132  #ifdef  VTLB_DEBUG
  14.133      check_vtlb_sanity(vtlb);
  14.134      dump_vtlb(vtlb);
  14.135  #endif
  14.136 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
  14.137 -    regs = vcpu_regs(vcpu);
  14.138 -    misr.val=regs->cr_isr;
  14.139 +    vpsr.val = vmx_vcpu_get_psr(v);
  14.140 +    misr.val=VMX(v,cr_isr);
  14.141 +
  14.142  /*  TODO
  14.143 -    if(vcpu->domain->id && vec == 2 &&
  14.144 +    if(v->domain->id && vec == 2 &&
  14.145         vpsr.dt == 0 && is_gpa_io(MASK_PMA(vaddr))){
  14.146          emulate_ins(&v);
  14.147          return;
  14.148 @@ -262,110 +295,110 @@ void vmx_hpw_miss(VCPU *vcpu, u64 vec, u
  14.149  */
  14.150  
  14.151      if((vec==1)&&(!vpsr.it)){
  14.152 -        physical_itlb_miss(vcpu, vadr);
  14.153 +        physical_itlb_miss(v, vadr);
  14.154          return;
  14.155      }
  14.156      if((vec==2)&&(!vpsr.dt)){
  14.157 -        if(vcpu->domain!=dom0&&__gpfn_is_io(vcpu->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
  14.158 -            emulate_io_inst(vcpu,((vadr<<1)>>1),4);   //  UC
  14.159 +        if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
  14.160 +            emulate_io_inst(v,((vadr<<1)>>1),4);   //  UC
  14.161          }else{
  14.162 -            physical_dtlb_miss(vcpu, vadr);
  14.163 +            physical_dtlb_miss(v, vadr);
  14.164          }
  14.165          return;
  14.166      }
  14.167 -    vrr = vmx_vcpu_rr(vcpu,vadr);
  14.168 +    vrr = vmx_vcpu_rr(v, vadr);
  14.169      if(vec == 1) type = ISIDE_TLB;
  14.170      else if(vec == 2) type = DSIDE_TLB;
  14.171      else panic("wrong vec\n");
  14.172  
  14.173 -//    prepare_if_physical_mode(vcpu);
  14.174 +//    prepare_if_physical_mode(v);
  14.175  
  14.176      if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
  14.177 -        if(vcpu->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(vcpu->domain, data->ppn>>(PAGE_SHIFT-12))){
  14.178 +        if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain, data->ppn>>(PAGE_SHIFT-12))){
  14.179              vadr=(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
  14.180 -            emulate_io_inst(vcpu, vadr, data->ma);
  14.181 +            emulate_io_inst(v, vadr, data->ma);
  14.182              return IA64_FAULT;
  14.183          }
  14.184      	if ( data->ps != vrr.ps ) {
  14.185 -    		machine_tlb_insert(vcpu, data);
  14.186 +    		machine_tlb_insert(v, data);
  14.187      	}
  14.188      	else {
  14.189  	        thash_insert(vtlb->ts->vhpt,data,vadr);
  14.190  	    }
  14.191      }else if(type == DSIDE_TLB){
  14.192 -        if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
  14.193 +        if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
  14.194              if(vpsr.ic){
  14.195 -                vcpu_set_isr(vcpu, misr.val);
  14.196 -                alt_dtlb(vcpu, vadr);
  14.197 +                vcpu_set_isr(v, misr.val);
  14.198 +                alt_dtlb(v, vadr);
  14.199                  return IA64_FAULT;
  14.200              } else{
  14.201                  if(misr.sp){
  14.202                      //TODO  lds emulation
  14.203                      panic("Don't support speculation load");
  14.204                  }else{
  14.205 -                    nested_dtlb(vcpu);
  14.206 +                    nested_dtlb(v);
  14.207                      return IA64_FAULT;
  14.208                  }
  14.209              }
  14.210          } else{
  14.211 -            vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
  14.212 -            vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
  14.213 +            vmx_vcpu_thash(v, vadr, &vhpt_adr);
  14.214 +            vrr=vmx_vcpu_rr(v,vhpt_adr);
  14.215              data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
  14.216              if(data){
  14.217                  if(vpsr.ic){
  14.218 -                    vcpu_set_isr(vcpu, misr.val);
  14.219 -                    dtlb_fault(vcpu, vadr);
  14.220 +                    vcpu_set_isr(v, misr.val);
  14.221 +                    dtlb_fault(v, vadr);
  14.222                      return IA64_FAULT;
  14.223                  }else{
  14.224                      if(misr.sp){
  14.225                          //TODO  lds emulation
  14.226                          panic("Don't support speculation load");
  14.227                      }else{
  14.228 -                        nested_dtlb(vcpu);
  14.229 +                        nested_dtlb(v);
  14.230                          return IA64_FAULT;
  14.231                      }
  14.232                  }
  14.233              }else{
  14.234                  if(vpsr.ic){
  14.235 -                    vcpu_set_isr(vcpu, misr.val);
  14.236 -                    dvhpt_fault(vcpu, vadr);
  14.237 +                    vcpu_set_isr(v, misr.val);
  14.238 +                    dvhpt_fault(v, vadr);
  14.239                      return IA64_FAULT;
  14.240                  }else{
  14.241                      if(misr.sp){
  14.242                          //TODO  lds emulation
  14.243                          panic("Don't support speculation load");
  14.244                      }else{
  14.245 -                        nested_dtlb(vcpu);
  14.246 +                        nested_dtlb(v);
  14.247                          return IA64_FAULT;
  14.248                      }
  14.249                  }
  14.250              }
  14.251          }
  14.252      }else if(type == ISIDE_TLB){
  14.253 -        if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
  14.254 +        if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
  14.255              if(!vpsr.ic){
  14.256                  misr.ni=1;
  14.257              }
  14.258 -            vcpu_set_isr(vcpu, misr.val);
  14.259 -            alt_itlb(vcpu, vadr);
  14.260 +            vcpu_set_isr(v, misr.val);
  14.261 +            alt_itlb(v, vadr);
  14.262              return IA64_FAULT;
  14.263          } else{
  14.264 -            vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
  14.265 -            vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
  14.266 +            vmx_vcpu_thash(v, vadr, &vhpt_adr);
  14.267 +            vrr=vmx_vcpu_rr(v,vhpt_adr);
  14.268              data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
  14.269              if(data){
  14.270                  if(!vpsr.ic){
  14.271                      misr.ni=1;
  14.272                  }
  14.273 -                vcpu_set_isr(vcpu, misr.val);
  14.274 -                itlb_fault(vcpu, vadr);
  14.275 +                vcpu_set_isr(v, misr.val);
  14.276 +                itlb_fault(v, vadr);
  14.277                  return IA64_FAULT;
  14.278              }else{
  14.279                  if(!vpsr.ic){
  14.280                      misr.ni=1;
  14.281                  }
  14.282 -                vcpu_set_isr(vcpu, misr.val);
  14.283 -                ivhpt_fault(vcpu, vadr);
  14.284 +                vcpu_set_isr(v, misr.val);
  14.285 +                ivhpt_fault(v, vadr);
  14.286                  return IA64_FAULT;
  14.287              }
  14.288          }
    15.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Sun Sep 18 12:18:57 2005 -0600
    15.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Mon Sep 19 11:08:20 2005 -0600
    15.3 @@ -308,6 +308,7 @@ IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
    15.4      UINT64 ifs, psr;
    15.5      REGS *regs = vcpu_regs(vcpu);
    15.6      psr = VCPU(vcpu,ipsr);
    15.7 +    vcpu_bsw1(vcpu);
    15.8      vmx_vcpu_set_psr(vcpu,psr);
    15.9      ifs=VCPU(vcpu,ifs);
   15.10      if((ifs>>63)&&(ifs<<1)){
   15.11 @@ -326,7 +327,7 @@ vmx_vcpu_get_psr(VCPU *vcpu)
   15.12      return VCPU(vcpu,vpsr);
   15.13  }
   15.14  
   15.15 -
   15.16 +#if 0
   15.17  IA64FAULT
   15.18  vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
   15.19  {
   15.20 @@ -378,8 +379,8 @@ vmx_vcpu_set_bgr(VCPU *vcpu, unsigned in
   15.21      return IA64_NO_FAULT;
   15.22  }
   15.23  
   15.24 -
   15.25 -
   15.26 +#endif
   15.27 +#if 0
   15.28  IA64FAULT
   15.29  vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
   15.30  {
   15.31 @@ -387,9 +388,11 @@ vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg
   15.32      int nat;
   15.33      //TODO, Eddie
   15.34      if (!regs) return 0;
   15.35 +#if 0
   15.36      if (reg >= 16 && reg < 32) {
   15.37          return vmx_vcpu_get_bgr(vcpu,reg,val);
   15.38      }
   15.39 +#endif
   15.40      getreg(reg,val,&nat,regs);    // FIXME: handle NATs later
   15.41      if(nat){
   15.42          return IA64_FAULT;
   15.43 @@ -410,13 +413,16 @@ vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg
   15.44  
   15.45      if (!regs) return IA64_ILLOP_FAULT;
   15.46      if (reg >= sof + 32) return IA64_ILLOP_FAULT;
   15.47 +#if 0
   15.48      if ( reg >= 16 && reg < 32 ) {
   15.49          return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
   15.50      }
   15.51 +#endif
   15.52      setreg(reg,value,nat,regs);
   15.53      return IA64_NO_FAULT;
   15.54  }
   15.55  
   15.56 +#endif
   15.57  
   15.58  IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
   15.59  {
    16.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Sun Sep 18 12:18:57 2005 -0600
    16.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Mon Sep 19 11:08:20 2005 -0600
    16.3 @@ -161,13 +161,13 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vc
    16.4  
    16.5  /*
    16.6      if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
    16.7 -        return vmx_vcpu_set_gr(vcpu, tgt, val);
    16.8 +        return vcpu_set_gr(vcpu, tgt, val);
    16.9      else return fault;
   16.10      */
   16.11      val = vmx_vcpu_get_psr(vcpu);
   16.12      val = (val & MASK(0, 32)) | (val & MASK(35, 2));
   16.13      last_guest_psr = val;
   16.14 -    return vmx_vcpu_set_gr(vcpu, tgt, val, 0);
   16.15 +    return vcpu_set_gr(vcpu, tgt, val, 0);
   16.16  }
   16.17  
   16.18  /**
   16.19 @@ -177,7 +177,7 @@ IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu
   16.20  {
   16.21      UINT64 val;
   16.22      IA64FAULT fault;
   16.23 -    if(vmx_vcpu_get_gr(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
   16.24 +    if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
   16.25  	panic(" get_psr nat bit fault\n");
   16.26  
   16.27  	val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
   16.28 @@ -229,7 +229,7 @@ IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST
   16.29          return IA64_FAULT;
   16.30      }
   16.31  #endif // CHECK_FAULT
   16.32 -   return vmx_vcpu_bsw0(vcpu);
   16.33 +   return vcpu_bsw0(vcpu);
   16.34  }
   16.35  
   16.36  IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
   16.37 @@ -244,7 +244,7 @@ IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST
   16.38          return IA64_FAULT;
   16.39      }
   16.40  #endif // CHECK_FAULT
   16.41 -    return vmx_vcpu_bsw1(vcpu);
   16.42 +    return vcpu_bsw1(vcpu);
   16.43  }
   16.44  
   16.45  IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
   16.46 @@ -265,7 +265,7 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INS
   16.47          privilege_op (vcpu);
   16.48          return IA64_FAULT;
   16.49      }
   16.50 -    if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&r2)){
   16.51 +    if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
   16.52  #ifdef  VMAL_NO_FAULT_CHECK
   16.53          set_isr_reg_nat_consumption(vcpu,0,0);
   16.54          rnat_comsumption(vcpu);
   16.55 @@ -299,7 +299,7 @@ IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INS
   16.56          return IA64_FAULT;
   16.57      }
   16.58  #endif // VMAL_NO_FAULT_CHECK
   16.59 -    if(vmx_vcpu_get_gr(vcpu,inst.M47.r3,&r3)){
   16.60 +    if(vcpu_get_gr_nat(vcpu,inst.M47.r3,&r3)){
   16.61  #ifdef  VMAL_NO_FAULT_CHECK
   16.62          set_isr_reg_nat_consumption(vcpu,0,0);
   16.63          rnat_comsumption(vcpu);
   16.64 @@ -334,8 +334,8 @@ IA64FAULT ptr_fault_check(VCPU *vcpu, IN
   16.65          return IA64_FAULT;
   16.66      }
   16.67  #endif // VMAL_NO_FAULT_CHECK
   16.68 -    ret1 = vmx_vcpu_get_gr(vcpu,inst.M45.r3,pr3);
   16.69 -    ret2 = vmx_vcpu_get_gr(vcpu,inst.M45.r2,pr2);
   16.70 +    ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r3,pr3);
   16.71 +    ret2 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pr2);
   16.72  #ifdef  VMAL_NO_FAULT_CHECK
   16.73      if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
   16.74          set_isr_reg_nat_consumption(vcpu,0,0);
   16.75 @@ -382,20 +382,20 @@ IA64FAULT vmx_emul_thash(VCPU *vcpu, INS
   16.76          return IA64_FAULT;
   16.77      }
   16.78  #endif //CHECK_FAULT
   16.79 -    if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
   16.80 +    if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
   16.81  #ifdef  CHECK_FAULT
   16.82 -        vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
   16.83 +        vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
   16.84          return IA64_NO_FAULT;
   16.85  #endif  //CHECK_FAULT
   16.86      }
   16.87  #ifdef  CHECK_FAULT
   16.88      if(unimplemented_gva(vcpu, r3)){
   16.89 -        vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
   16.90 +        vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
   16.91          return IA64_NO_FAULT;
   16.92      }
   16.93  #endif  //CHECK_FAULT
   16.94      vmx_vcpu_thash(vcpu, r3, &r1);
   16.95 -    vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
   16.96 +    vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
   16.97      return(IA64_NO_FAULT);
   16.98  }
   16.99  
  16.100 @@ -412,20 +412,20 @@ IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST
  16.101          return IA64_FAULT;
  16.102      }
  16.103  #endif //CHECK_FAULT
  16.104 -    if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
  16.105 +    if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
  16.106  #ifdef  CHECK_FAULT
  16.107 -        vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
  16.108 +        vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
  16.109          return IA64_NO_FAULT;
  16.110  #endif  //CHECK_FAULT
  16.111      }
  16.112  #ifdef  CHECK_FAULT
  16.113      if(unimplemented_gva(vcpu, r3)){
  16.114 -        vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
  16.115 +        vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
  16.116          return IA64_NO_FAULT;
  16.117      }
  16.118  #endif  //CHECK_FAULT
  16.119      vmx_vcpu_ttag(vcpu, r3, &r1);
  16.120 -    vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  16.121 +    vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  16.122      return(IA64_NO_FAULT);
  16.123  }
  16.124  
  16.125 @@ -448,7 +448,7 @@ IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST6
  16.126          return IA64_FAULT;
  16.127      }
  16.128  #endif  //CHECK_FAULT
  16.129 -    if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
  16.130 +    if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
  16.131  #ifdef  CHECK_FAULT
  16.132          set_isr_reg_nat_consumption(vcpu,0,1);
  16.133          rnat_comsumption(vcpu);
  16.134 @@ -470,7 +470,7 @@ IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST6
  16.135      if(vmx_vcpu_tpa(vcpu, r3, &r1)){
  16.136          return IA64_FAULT;
  16.137      }
  16.138 -    vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  16.139 +    vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  16.140      return(IA64_NO_FAULT);
  16.141  }
  16.142  
  16.143 @@ -493,7 +493,7 @@ IA64FAULT vmx_emul_tak(VCPU *vcpu, INST6
  16.144          return IA64_FAULT;
  16.145      }
  16.146  #endif
  16.147 -    if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
  16.148 +    if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
  16.149  #ifdef  CHECK_FAULT
  16.150          set_isr_reg_nat_consumption(vcpu,0,1);
  16.151          rnat_comsumption(vcpu);
  16.152 @@ -503,7 +503,7 @@ IA64FAULT vmx_emul_tak(VCPU *vcpu, INST6
  16.153      if(vmx_vcpu_tak(vcpu, r3, &r1)){
  16.154          return IA64_FAULT;
  16.155      }
  16.156 -    vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  16.157 +    vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  16.158      return(IA64_NO_FAULT);
  16.159  }
  16.160  
  16.161 @@ -531,7 +531,7 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS
  16.162          return IA64_FAULT;
  16.163      }
  16.164  #endif // VMAL_NO_FAULT_CHECK
  16.165 -    if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){
  16.166 +    if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
  16.167  #ifdef  VMAL_NO_FAULT_CHECK
  16.168          set_isr_reg_nat_consumption(vcpu,0,0);
  16.169          rnat_comsumption(vcpu);
  16.170 @@ -588,7 +588,7 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
  16.171          return IA64_FAULT;
  16.172      }
  16.173  #endif // VMAL_NO_FAULT_CHECK
  16.174 -    if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){
  16.175 +    if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
  16.176  #ifdef  VMAL_NO_FAULT_CHECK
  16.177          set_isr_reg_nat_consumption(vcpu,0,0);
  16.178          rnat_comsumption(vcpu);
  16.179 @@ -648,7 +648,7 @@ IA64FAULT itc_fault_check(VCPU *vcpu, IN
  16.180          return IA64_FAULT;
  16.181      }
  16.182  #endif // VMAL_NO_FAULT_CHECK
  16.183 -    ret1 = vmx_vcpu_get_gr(vcpu,inst.M45.r2,pte);
  16.184 +    ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte);
  16.185  #ifdef  VMAL_NO_FAULT_CHECK
  16.186      if( ret1 != IA64_NO_FAULT ){
  16.187          set_isr_reg_nat_consumption(vcpu,0,0);
  16.188 @@ -734,7 +734,7 @@ IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *v
  16.189      if(inst.M29.ar3!=44){
  16.190          panic("Can't support ar register other than itc");
  16.191      }
  16.192 -    if(vmx_vcpu_get_gr(vcpu,inst.M29.r2,&r2)){
  16.193 +    if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){
  16.194  #ifdef  CHECK_FAULT
  16.195          set_isr_reg_nat_consumption(vcpu,0,0);
  16.196          rnat_comsumption(vcpu);
  16.197 @@ -778,7 +778,7 @@ IA64FAULT vmx_emul_mov_from_ar_reg(VCPU 
  16.198  #endif // CHECK_FAULT
  16.199      u64 r1;
  16.200      vmx_vcpu_get_itc(vcpu,&r1);
  16.201 -    vmx_vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
  16.202 +    vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
  16.203      return IA64_NO_FAULT;
  16.204  }
  16.205  
  16.206 @@ -800,7 +800,7 @@ IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu
  16.207          return IA64_FAULT;
  16.208      }
  16.209  #endif // CHECK_FAULT
  16.210 -    if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
  16.211 +    if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
  16.212  #ifdef  CHECK_FAULT
  16.213          set_isr_reg_nat_consumption(vcpu,0,0);
  16.214          rnat_comsumption(vcpu);
  16.215 @@ -823,7 +823,7 @@ IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu,
  16.216          return IA64_FAULT;
  16.217      }
  16.218  #endif // CHECK_FAULT
  16.219 -    if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
  16.220 +    if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
  16.221  #ifdef  CHECK_FAULT
  16.222          set_isr_reg_nat_consumption(vcpu,0,0);
  16.223          rnat_comsumption(vcpu);
  16.224 @@ -846,7 +846,7 @@ IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu
  16.225          return IA64_FAULT;
  16.226      }
  16.227  #endif // CHECK_FAULT
  16.228 -    if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
  16.229 +    if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
  16.230  #ifdef  CHECK_FAULT
  16.231          set_isr_reg_nat_consumption(vcpu,0,0);
  16.232          rnat_comsumption(vcpu);
  16.233 @@ -869,7 +869,7 @@ IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu
  16.234          return IA64_FAULT;
  16.235      }
  16.236  #endif // CHECK_FAULT
  16.237 -    if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
  16.238 +    if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
  16.239  #ifdef  CHECK_FAULT
  16.240          set_isr_reg_nat_consumption(vcpu,0,0);
  16.241          rnat_comsumption(vcpu);
  16.242 @@ -892,7 +892,7 @@ IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu
  16.243          return IA64_FAULT;
  16.244      }
  16.245  #endif // CHECK_FAULT
  16.246 -    if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
  16.247 +    if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
  16.248  #ifdef  CHECK_FAULT
  16.249          set_isr_reg_nat_consumption(vcpu,0,0);
  16.250          rnat_comsumption(vcpu);
  16.251 @@ -915,7 +915,7 @@ IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu
  16.252          return IA64_FAULT;
  16.253      }
  16.254  #endif // CHECK_FAULT
  16.255 -    if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
  16.256 +    if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
  16.257  #ifdef  CHECK_FAULT
  16.258          set_isr_reg_nat_consumption(vcpu,0,0);
  16.259          rnat_comsumption(vcpu);
  16.260 @@ -949,7 +949,7 @@ IA64FAULT vmx_emul_mov_from_rr(VCPU *vcp
  16.261      }
  16.262  
  16.263  #endif //CHECK_FAULT
  16.264 -     if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
  16.265 +     if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
  16.266  #ifdef  CHECK_FAULT
  16.267          set_isr_reg_nat_consumption(vcpu,0,0);
  16.268          rnat_comsumption(vcpu);
  16.269 @@ -963,7 +963,7 @@ IA64FAULT vmx_emul_mov_from_rr(VCPU *vcp
  16.270      }
  16.271  #endif  //CHECK_FAULT
  16.272      vmx_vcpu_get_rr(vcpu,r3,&r1);
  16.273 -    return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
  16.274 +    return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
  16.275  }
  16.276  
  16.277  IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
  16.278 @@ -985,7 +985,7 @@ IA64FAULT vmx_emul_mov_from_pkr(VCPU *vc
  16.279      }
  16.280  
  16.281  #endif //CHECK_FAULT
  16.282 -     if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
  16.283 +     if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
  16.284  #ifdef  CHECK_FAULT
  16.285          set_isr_reg_nat_consumption(vcpu,0,0);
  16.286          rnat_comsumption(vcpu);
  16.287 @@ -1000,7 +1000,7 @@ IA64FAULT vmx_emul_mov_from_pkr(VCPU *vc
  16.288      }
  16.289  #endif  //CHECK_FAULT
  16.290      vmx_vcpu_get_pkr(vcpu,r3,&r1);
  16.291 -    return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
  16.292 +    return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
  16.293  }
  16.294  
  16.295  IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
  16.296 @@ -1022,7 +1022,7 @@ IA64FAULT vmx_emul_mov_from_dbr(VCPU *vc
  16.297      }
  16.298  
  16.299  #endif //CHECK_FAULT
  16.300 -     if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
  16.301 +     if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
  16.302  #ifdef  CHECK_FAULT
  16.303          set_isr_reg_nat_consumption(vcpu,0,0);
  16.304          rnat_comsumption(vcpu);
  16.305 @@ -1037,7 +1037,7 @@ IA64FAULT vmx_emul_mov_from_dbr(VCPU *vc
  16.306      }
  16.307  #endif  //CHECK_FAULT
  16.308      vmx_vcpu_get_dbr(vcpu,r3,&r1);
  16.309 -    return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
  16.310 +    return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
  16.311  }
  16.312  
  16.313  IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
  16.314 @@ -1059,7 +1059,7 @@ IA64FAULT vmx_emul_mov_from_ibr(VCPU *vc
  16.315      }
  16.316  
  16.317  #endif //CHECK_FAULT
  16.318 -     if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
  16.319 +     if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
  16.320  #ifdef  CHECK_FAULT
  16.321          set_isr_reg_nat_consumption(vcpu,0,0);
  16.322          rnat_comsumption(vcpu);
  16.323 @@ -1074,7 +1074,7 @@ IA64FAULT vmx_emul_mov_from_ibr(VCPU *vc
  16.324      }
  16.325  #endif  //CHECK_FAULT
  16.326      vmx_vcpu_get_ibr(vcpu,r3,&r1);
  16.327 -    return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
  16.328 +    return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
  16.329  }
  16.330  
  16.331  IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
  16.332 @@ -1096,7 +1096,7 @@ IA64FAULT vmx_emul_mov_from_pmc(VCPU *vc
  16.333      }
  16.334  
  16.335  #endif //CHECK_FAULT
  16.336 -     if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
  16.337 +     if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
  16.338  #ifdef  CHECK_FAULT
  16.339          set_isr_reg_nat_consumption(vcpu,0,0);
  16.340          rnat_comsumption(vcpu);
  16.341 @@ -1111,7 +1111,7 @@ IA64FAULT vmx_emul_mov_from_pmc(VCPU *vc
  16.342      }
  16.343  #endif  //CHECK_FAULT
  16.344      vmx_vcpu_get_pmc(vcpu,r3,&r1);
  16.345 -    return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
  16.346 +    return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
  16.347  }
  16.348  
  16.349  IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
  16.350 @@ -1124,7 +1124,7 @@ IA64FAULT vmx_emul_mov_from_cpuid(VCPU *
  16.351          return IA64_FAULT;
  16.352      }
  16.353  #endif //CHECK_FAULT
  16.354 -     if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
  16.355 +     if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
  16.356  #ifdef  CHECK_FAULT
  16.357          set_isr_reg_nat_consumption(vcpu,0,0);
  16.358          rnat_comsumption(vcpu);
  16.359 @@ -1139,7 +1139,7 @@ IA64FAULT vmx_emul_mov_from_cpuid(VCPU *
  16.360      }
  16.361  #endif  //CHECK_FAULT
  16.362      vmx_vcpu_get_cpuid(vcpu,r3,&r1);
  16.363 -    return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
  16.364 +    return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
  16.365  }
  16.366  
  16.367  IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
  16.368 @@ -1160,7 +1160,7 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
  16.369          return IA64_FAULT;
  16.370      }
  16.371  #endif // CHECK_FAULT
  16.372 -    if(vmx_vcpu_get_gr(vcpu, inst.M32.r2, &r2)){
  16.373 +    if(vcpu_get_gr_nat(vcpu, inst.M32.r2, &r2)){
  16.374  #ifdef  CHECK_FAULT
  16.375          set_isr_reg_nat_consumption(vcpu,0,0);
  16.376          rnat_comsumption(vcpu);
  16.377 @@ -1214,11 +1214,11 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
  16.378  
  16.379  #define cr_get(cr) \
  16.380      ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
  16.381 -        vmx_vcpu_set_gr(vcpu, tgt, val,0):fault;
  16.382 +        vcpu_set_gr(vcpu, tgt, val,0):fault;
  16.383  
  16.384  #define vmx_cr_get(cr) \
  16.385      ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
  16.386 -        vmx_vcpu_set_gr(vcpu, tgt, val,0):fault;
  16.387 +        vcpu_set_gr(vcpu, tgt, val,0):fault;
  16.388  
  16.389  IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
  16.390  {
  16.391 @@ -1260,9 +1260,9 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcp
  16.392          case 64:return vmx_cr_get(lid);
  16.393          case 65:
  16.394                  vmx_vcpu_get_ivr(vcpu,&val);
  16.395 -                return vmx_vcpu_set_gr(vcpu,tgt,val,0);
  16.396 +                return vcpu_set_gr(vcpu,tgt,val,0);
  16.397          case 66:return vmx_cr_get(tpr);
  16.398 -        case 67:return vmx_vcpu_set_gr(vcpu,tgt,0L,0);
  16.399 +        case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
  16.400          case 68:return vmx_cr_get(irr0);
  16.401          case 69:return vmx_cr_get(irr1);
  16.402          case 70:return vmx_cr_get(irr2);
  16.403 @@ -1306,18 +1306,19 @@ IA64_BUNDLE __vmx_get_domain_bundle(u64 
  16.404   */
  16.405  
  16.406  void
  16.407 -vmx_emulate(VCPU *vcpu, UINT64 cause, UINT64 opcode)
  16.408 +vmx_emulate(VCPU *vcpu, REGS *regs)
  16.409  {
  16.410      IA64_BUNDLE bundle;
  16.411      int slot;
  16.412      IA64_SLOT_TYPE slot_type;
  16.413      IA64FAULT status;
  16.414      INST64 inst;
  16.415 -    REGS * regs;
  16.416 -    UINT64 iip;
  16.417 -    regs = vcpu_regs(vcpu);
  16.418 +    UINT64 iip, cause, opcode;
  16.419      iip = regs->cr_iip;
  16.420      IA64_PSR vpsr;
  16.421 +    cause = VMX(vcpu,cause);
  16.422 +    opcode = VMX(vcpu,opcode);
  16.423 +
  16.424  /*
  16.425      if (privop_trace) {
  16.426          static long i = 400;
  16.427 @@ -1356,7 +1357,6 @@ if ( (cause == 0xff && opcode == 0x1e000
  16.428  #else
  16.429      inst.inst=opcode;
  16.430  #endif /* BYPASS_VMAL_OPCODE */
  16.431 -    vcpu_set_regs(vcpu, regs);
  16.432      /*
  16.433       * Switch to actual virtual rid in rr0 and rr4,
  16.434       * which is required by some tlb related instructions.
    17.1 --- a/xen/arch/ia64/vmx/vtlb.c	Sun Sep 18 12:18:57 2005 -0600
    17.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Mon Sep 19 11:08:20 2005 -0600
    17.3 @@ -421,10 +421,11 @@ static void vhpt_insert(thash_cb_t *hcb,
    17.4              *cch = *hash_table;
    17.5              *hash_table = vhpt_entry;
    17.6              hash_table->next = cch;
    17.7 +            if(hash_table->tag==hash_table->next->tag)
    17.8 +                while(1);
    17.9 +
   17.10          }
   17.11  
   17.12 -        if(hash_table->tag==hash_table->next->tag)
   17.13 -            while(1);
   17.14      }
   17.15      return /*hash_table*/;
   17.16  }
    18.1 --- a/xen/arch/ia64/xen/domain.c	Sun Sep 18 12:18:57 2005 -0600
    18.2 +++ b/xen/arch/ia64/xen/domain.c	Mon Sep 19 11:08:20 2005 -0600
    18.3 @@ -364,7 +364,8 @@ void new_thread(struct vcpu *v,
    18.4  #ifdef CONFIG_VTI
    18.5  		vmx_init_all_rr(v);
    18.6  		if (d == dom0)
    18.7 -		    VCPU(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
    18.8 +//		    VCPU(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
    18.9 +		    regs->r28 = dom_fw_setup(d,saved_command_line,256L);
   18.10  		/* Virtual processor context setup */
   18.11  		VCPU(v, vpsr) = IA64_PSR_BN;
   18.12  		VCPU(v, dcr) = 0;
    19.1 --- a/xen/arch/ia64/xen/ivt.S	Sun Sep 18 12:18:57 2005 -0600
    19.2 +++ b/xen/arch/ia64/xen/ivt.S	Mon Sep 19 11:08:20 2005 -0600
    19.3 @@ -1012,7 +1012,8 @@ dispatch_break_fault_post_save:
    19.4  	movl r14=ia64_leave_kernel
    19.5  	;;
    19.6  	mov rp=r14
    19.7 -	br.sptk.many ia64_prepare_handle_break
    19.8 +//	br.sptk.many ia64_prepare_handle_break
    19.9 +    br.call.sptk.many b6=ia64_handle_break
   19.10  END(dispatch_break_fault)
   19.11  #endif
   19.12  
   19.13 @@ -1239,7 +1240,8 @@ ENTRY(dispatch_privop_fault)
   19.14  	movl r14=ia64_leave_kernel
   19.15  	;;
   19.16  	mov rp=r14
   19.17 -	br.sptk.many ia64_prepare_handle_privop
   19.18 +//	br.sptk.many ia64_prepare_handle_privop
   19.19 +     br.call.sptk.many b6=ia64_handle_privop
   19.20  END(dispatch_privop_fault)
   19.21  #endif
   19.22  
   19.23 @@ -1307,7 +1309,8 @@ ENTRY(dispatch_unaligned_handler)
   19.24  	movl r14=ia64_leave_kernel
   19.25  	;;
   19.26  	mov rp=r14
   19.27 -	br.sptk.many ia64_prepare_handle_unaligned
   19.28 +//	br.sptk.many ia64_prepare_handle_unaligned
   19.29 +    br.call.sptk.many b6=ia64_handle_unaligned
   19.30  END(dispatch_unaligned_handler)
   19.31  
   19.32  	.org ia64_ivt+0x4c00
   19.33 @@ -1874,7 +1877,8 @@ GLOBAL_ENTRY(dispatch_reflection)
   19.34  	movl r14=ia64_leave_kernel
   19.35  	;;
   19.36  	mov rp=r14
   19.37 -	br.sptk.many ia64_prepare_handle_reflection
   19.38 +//	br.sptk.many ia64_prepare_handle_reflection
   19.39 +    br.call.sptk.many b6=ia64_handle_reflection
   19.40  END(dispatch_reflection)
   19.41  
   19.42  #define SAVE_MIN_COVER_DONE	DO_SAVE_MIN(,mov r30=cr.ifs,)
    20.1 --- a/xen/arch/ia64/xen/privop.c	Sun Sep 18 12:18:57 2005 -0600
    20.2 +++ b/xen/arch/ia64/xen/privop.c	Mon Sep 19 11:08:20 2005 -0600
    20.3 @@ -143,7 +143,7 @@ IA64FAULT priv_tpa(VCPU *vcpu, INST64 in
    20.4  		fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
    20.5  	else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
    20.6  	if (fault == IA64_NO_FAULT)
    20.7 -		return vcpu_set_gr(vcpu, inst.M46.r1, padr);
    20.8 +		return vcpu_set_gr(vcpu, inst.M46.r1, padr, 0);
    20.9  	else return fault;
   20.10  }
   20.11  
   20.12 @@ -158,7 +158,7 @@ IA64FAULT priv_tak(VCPU *vcpu, INST64 in
   20.13  		fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
   20.14  	else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
   20.15  	if (fault == IA64_NO_FAULT)
   20.16 -		return vcpu_set_gr(vcpu, inst.M46.r1, key);
   20.17 +		return vcpu_set_gr(vcpu, inst.M46.r1, key,0);
   20.18  	else return fault;
   20.19  }
   20.20  
   20.21 @@ -244,7 +244,7 @@ IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu,
   20.22  	if (inst.M29.r2 > 63 && inst.M29.ar3 < 8) { // privified mov from kr
   20.23  		UINT64 val;
   20.24  		if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
   20.25 -			return vcpu_set_gr(vcpu, inst.M29.r2-64, val);
   20.26 +			return vcpu_set_gr(vcpu, inst.M29.r2-64, val,0);
   20.27  		else return IA64_ILLOP_FAULT;
   20.28  	}
   20.29  	else {
   20.30 @@ -369,12 +369,12 @@ IA64FAULT priv_mov_from_rr(VCPU *vcpu, I
   20.31  	if (inst.M43.r1 > 63) { // privified mov from cpuid
   20.32  		fault = vcpu_get_cpuid(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   20.33  		if (fault == IA64_NO_FAULT)
   20.34 -			return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
   20.35 +			return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
   20.36  	}
   20.37  	else {
   20.38  		fault = vcpu_get_rr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   20.39  		if (fault == IA64_NO_FAULT)
   20.40 -			return vcpu_set_gr(vcpu, inst.M43.r1, val);
   20.41 +			return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
   20.42  	}
   20.43  	return fault;
   20.44  }
   20.45 @@ -386,7 +386,7 @@ IA64FAULT priv_mov_from_pkr(VCPU *vcpu, 
   20.46  	
   20.47  	fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   20.48  	if (fault == IA64_NO_FAULT)
   20.49 -		return vcpu_set_gr(vcpu, inst.M43.r1, val);
   20.50 +		return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
   20.51  	else return fault;
   20.52  }
   20.53  
   20.54 @@ -397,7 +397,7 @@ IA64FAULT priv_mov_from_dbr(VCPU *vcpu, 
   20.55  	
   20.56  	fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   20.57  	if (fault == IA64_NO_FAULT)
   20.58 -		return vcpu_set_gr(vcpu, inst.M43.r1, val);
   20.59 +		return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
   20.60  	else return fault;
   20.61  }
   20.62  
   20.63 @@ -408,7 +408,7 @@ IA64FAULT priv_mov_from_ibr(VCPU *vcpu, 
   20.64  	
   20.65  	fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   20.66  	if (fault == IA64_NO_FAULT)
   20.67 -		return vcpu_set_gr(vcpu, inst.M43.r1, val);
   20.68 +		return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
   20.69  	else return fault;
   20.70  }
   20.71  
   20.72 @@ -420,12 +420,12 @@ IA64FAULT priv_mov_from_pmc(VCPU *vcpu, 
   20.73  	if (inst.M43.r1 > 63) { // privified mov from pmd
   20.74  		fault = vcpu_get_pmd(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   20.75  		if (fault == IA64_NO_FAULT)
   20.76 -			return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
   20.77 +			return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
   20.78  	}
   20.79  	else {
   20.80  		fault = vcpu_get_pmc(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   20.81  		if (fault == IA64_NO_FAULT)
   20.82 -			return vcpu_set_gr(vcpu, inst.M43.r1, val);
   20.83 +			return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
   20.84  	}
   20.85  	return fault;
   20.86  }
   20.87 @@ -434,7 +434,7 @@ unsigned long from_cr_cnt[128] = { 0 };
   20.88  
   20.89  #define cr_get(cr) \
   20.90  	((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
   20.91 -		vcpu_set_gr(vcpu, tgt, val) : fault;
   20.92 +		vcpu_set_gr(vcpu, tgt, val, 0) : fault;
   20.93  	
   20.94  IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
   20.95  {
   20.96 @@ -460,7 +460,7 @@ IA64FAULT priv_mov_from_cr(VCPU *vcpu, I
   20.97  	    case 64:return cr_get(lid);
   20.98  	    case 65:return cr_get(ivr);
   20.99  	    case 66:return cr_get(tpr);
  20.100 -	    case 67:return vcpu_set_gr(vcpu,tgt,0L);
  20.101 +	    case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
  20.102  	    case 68:return cr_get(irr0);
  20.103  	    case 69:return cr_get(irr1);
  20.104  	    case 70:return cr_get(irr2);
  20.105 @@ -482,7 +482,7 @@ IA64FAULT priv_mov_from_psr(VCPU *vcpu, 
  20.106  	IA64FAULT fault;
  20.107  
  20.108  	if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
  20.109 -		return vcpu_set_gr(vcpu, tgt, val);
  20.110 +		return vcpu_set_gr(vcpu, tgt, val, 0);
  20.111  	else return fault;
  20.112  }
  20.113  
    21.1 --- a/xen/arch/ia64/xen/process.c	Sun Sep 18 12:18:57 2005 -0600
    21.2 +++ b/xen/arch/ia64/xen/process.c	Mon Sep 19 11:08:20 2005 -0600
    21.3 @@ -548,7 +548,7 @@ do_ssc(unsigned long ssc, struct pt_regs
    21.4  		break;
    21.5  	    case SSC_GETCHAR:
    21.6  		retval = ia64_ssc(0,0,0,0,ssc);
    21.7 -		vcpu_set_gr(current,8,retval);
    21.8 +		vcpu_set_gr(current,8,retval,0);
    21.9  		break;
   21.10  	    case SSC_WAIT_COMPLETION:
   21.11  		if (arg0) {	// metaphysical address
   21.12 @@ -562,7 +562,7 @@ do_ssc(unsigned long ssc, struct pt_regs
   21.13  /**/			retval = 0;
   21.14  		}
   21.15  		else retval = -1L;
   21.16 -		vcpu_set_gr(current,8,retval);
   21.17 +		vcpu_set_gr(current,8,retval,0);
   21.18  		break;
   21.19  	    case SSC_OPEN:
   21.20  		arg1 = vcpu_get_gr(current,33);	// access rights
   21.21 @@ -572,7 +572,7 @@ if (!running_on_sim) { printf("SSC_OPEN,
   21.22  			retval = ia64_ssc(arg0,arg1,0,0,ssc);
   21.23  		}
   21.24  		else retval = -1L;
   21.25 -		vcpu_set_gr(current,8,retval);
   21.26 +		vcpu_set_gr(current,8,retval,0);
   21.27  		break;
   21.28  	    case SSC_WRITE:
   21.29  	    case SSC_READ:
   21.30 @@ -620,7 +620,7 @@ if (!running_on_sim) { printf("SSC_OPEN,
   21.31  			req->len = last_count;
   21.32  		}
   21.33  		else retval = -1L;
   21.34 -		vcpu_set_gr(current,8,retval);
   21.35 +		vcpu_set_gr(current,8,retval,0);
   21.36  //if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
   21.37  		break;
   21.38  	    case SSC_CONNECT_INTERRUPT:
   21.39 @@ -631,7 +631,7 @@ if (!running_on_sim) { printf("SSC_OPEN,
   21.40  		(void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   21.41  		break;
   21.42  	    case SSC_NETDEV_PROBE:
   21.43 -		vcpu_set_gr(current,8,-1L);
   21.44 +		vcpu_set_gr(current,8,-1L,0);
   21.45  		break;
   21.46  	    default:
   21.47  		printf("ia64_handle_break: bad ssc code %lx, iip=%p, b0=%p... spinning\n",ssc,regs->cr_iip,regs->b0);
   21.48 @@ -763,18 +763,18 @@ unsigned long __hypercall_create_continu
   21.49      if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
   21.50  	panic("PREEMPT happen in multicall\n");	// Not support yet
   21.51      } else {
   21.52 -	vcpu_set_gr(vcpu, 2, op);
   21.53 +	vcpu_set_gr(vcpu, 2, op, 0);
   21.54  	for ( i = 0; i < nr_args; i++) {
   21.55  	    switch (i) {
   21.56 -	    case 0: vcpu_set_gr(vcpu, 14, va_arg(args, unsigned long));
   21.57 -		    break;
   21.58 -	    case 1: vcpu_set_gr(vcpu, 15, va_arg(args, unsigned long));
   21.59 +	    case 0: vcpu_set_gr(vcpu, 14, va_arg(args, unsigned long), 0);
   21.60  		    break;
   21.61 -	    case 2: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long));
   21.62 +	    case 1: vcpu_set_gr(vcpu, 15, va_arg(args, unsigned long), 0);
   21.63  		    break;
   21.64 -	    case 3: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long));
   21.65 +	    case 2: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
   21.66  		    break;
   21.67 -	    case 4: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long));
   21.68 +	    case 3: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
   21.69 +		    break;
   21.70 +	    case 4: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
   21.71  		    break;
   21.72  	    default: panic("Too many args for hypercall continuation\n");
   21.73  		    break;
    22.1 --- a/xen/arch/ia64/xen/vcpu.c	Sun Sep 18 12:18:57 2005 -0600
    22.2 +++ b/xen/arch/ia64/xen/vcpu.c	Mon Sep 19 11:08:20 2005 -0600
    22.3 @@ -31,7 +31,8 @@ typedef	union {
    22.4  //typedef struct domain VCPU;
    22.5  
    22.6  // this def for vcpu_regs won't work if kernel stack is present
    22.7 -#define	vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
    22.8 +//#define	vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
    22.9 +#define vcpu_regs(vcpu) (((struct pt_regs *) ((char *) (vcpu) + IA64_STK_OFFSET)) - 1)
   22.10  #define	PSCB(x,y)	VCPU(x,y)
   22.11  #define	PSCBX(x,y)	x->arch.y
   22.12  
   22.13 @@ -70,22 +71,45 @@ extern TR_ENTRY *match_dtlb(VCPU *vcpu, 
   22.14  /**************************************************************************
   22.15   VCPU general register access routines
   22.16  **************************************************************************/
   22.17 -
   22.18 +#ifdef XEN
   22.19  UINT64
   22.20  vcpu_get_gr(VCPU *vcpu, unsigned reg)
   22.21  {
   22.22  	REGS *regs = vcpu_regs(vcpu);
   22.23  	UINT64 val;
   22.24 -
   22.25  	if (!reg) return 0;
   22.26  	getreg(reg,&val,0,regs);	// FIXME: handle NATs later
   22.27  	return val;
   22.28  }
   22.29 +IA64FAULT
   22.30 +vcpu_get_gr_nat(VCPU *vcpu, unsigned reg, UINT64 *val)
   22.31 +{
   22.32 +	REGS *regs = vcpu_regs(vcpu);
   22.33 +    int nat;
   22.34 +	getreg(reg,val,&nat,regs);	// FIXME: handle NATs later
   22.35 +    if(nat)
   22.36 +        return IA64_NAT_CONSUMPTION_VECTOR;
   22.37 +	return 0;
   22.38 +}
   22.39  
   22.40  // returns:
   22.41  //   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
   22.42  //   IA64_NO_FAULT otherwise
   22.43  IA64FAULT
   22.44 +vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value, int nat)
   22.45 +{
   22.46 +	REGS *regs = vcpu_regs(vcpu);
   22.47 +	if (!reg) return IA64_ILLOP_FAULT;
   22.48 +	long sof = (regs->cr_ifs) & 0x7f;
   22.49 +	if (reg >= sof + 32) return IA64_ILLOP_FAULT;
   22.50 +	setreg(reg,value,nat,regs);	// FIXME: handle NATs later
   22.51 +	return IA64_NO_FAULT;
   22.52 +}
   22.53 +#else
   22.54 +// returns:
   22.55 +//   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
   22.56 +//   IA64_NO_FAULT otherwise
   22.57 +IA64FAULT
   22.58  vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
   22.59  {
   22.60  	REGS *regs = vcpu_regs(vcpu);
   22.61 @@ -97,6 +121,7 @@ vcpu_set_gr(VCPU *vcpu, unsigned reg, UI
   22.62  	return IA64_NO_FAULT;
   22.63  }
   22.64  
   22.65 +#endif
   22.66  /**************************************************************************
   22.67   VCPU privileged application register access routines
   22.68  **************************************************************************/
   22.69 @@ -586,11 +611,9 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
   22.70  		printf("vcpu_pend_interrupt: bad vector\n");
   22.71  		return;
   22.72  	}
   22.73 -//#ifdef CONFIG_VTI
   22.74      if ( VMX_DOMAIN(vcpu) ) {
   22.75   	    set_bit(vector,VCPU(vcpu,irr));
   22.76      } else
   22.77 -//#endif // CONFIG_VTI
   22.78      {
   22.79  	/* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
   22.80  	if (test_bit(vector,PSCBX(vcpu,irr))) {
   22.81 @@ -1360,7 +1383,7 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
   22.82  
   22.83  			vcpu_thash(vcpu, address, &iha);
   22.84  			if (__copy_from_user(&pte, (void *)iha, sizeof(pte)) != 0)
   22.85 -				return IA64_VHPT_TRANS_VECTOR;
   22.86 +				return IA64_VHPT_FAULT;
   22.87  
   22.88  			/* 
   22.89  			 * Optimisation: this VHPT walker aborts on not-present pages
   22.90 @@ -1496,6 +1519,18 @@ printf("%lx=vcpu_get_pmd(%x)\n",val,reg)
   22.91  /**************************************************************************
   22.92   VCPU banked general register access routines
   22.93  **************************************************************************/
   22.94 +#define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT)     \
   22.95 +do{     \
   22.96 +    __asm__ __volatile__ (                      \
   22.97 +        ";;extr.u %0 = %3,%6,16;;\n"            \
   22.98 +        "dep %1 = %0, %1, 0, 16;;\n"            \
   22.99 +        "st8 [%4] = %1\n"                       \
  22.100 +        "extr.u %0 = %2, 16, 16;;\n"            \
  22.101 +        "dep %3 = %0, %3, %6, 16;;\n"           \
  22.102 +        "st8 [%5] = %3\n"                       \
  22.103 +        ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
  22.104 +        "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory");    \
  22.105 +}while(0)
  22.106  
  22.107  IA64FAULT vcpu_bsw0(VCPU *vcpu)
  22.108  {
  22.109 @@ -1504,15 +1539,41 @@ IA64FAULT vcpu_bsw0(VCPU *vcpu)
  22.110  	unsigned long *r = &regs->r16;
  22.111  	unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
  22.112  	unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
  22.113 -	int i;
  22.114 +	unsigned long *runat = &regs->eml_unat;
  22.115 +	unsigned long *b0unat = &PSCB(vcpu,vbnat);
  22.116 +	unsigned long *b1unat = &PSCB(vcpu,vnat);
  22.117  
  22.118 -	if (PSCB(vcpu,banknum)) {
  22.119 -		for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
  22.120 -		PSCB(vcpu,banknum) = 0;
  22.121 -	}
  22.122 +	unsigned long i;
  22.123 +
  22.124 +    if(VMX_DOMAIN(vcpu)){
  22.125 +        if(VCPU(vcpu,vpsr)&IA64_PSR_BN){
  22.126 +            for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
  22.127 +            vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
  22.128 +            VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
  22.129 +        }
  22.130 +    }else{
  22.131 +        if (PSCB(vcpu,banknum)) {
  22.132 +            for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
  22.133 +            vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
  22.134 +            PSCB(vcpu,banknum) = 0;
  22.135 +        }
  22.136 +    }
  22.137  	return (IA64_NO_FAULT);
  22.138  }
  22.139  
  22.140 +#define vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT)     \
  22.141 +do{             \
  22.142 +    __asm__ __volatile__ (      \
  22.143 +        ";;extr.u %0 = %3,%6,16;;\n"                \
  22.144 +        "dep %1 = %0, %1, 16, 16;;\n"               \
  22.145 +        "st8 [%4] = %1\n"                           \
  22.146 +        "extr.u %0 = %2, 0, 16;;\n"                 \
  22.147 +        "dep %3 = %0, %3, %6, 16;;\n"               \
  22.148 +        "st8 [%5] = %3\n"                           \
  22.149 +        ::"r"(i),"r"(*b0unat),"r"(*b1unat),"r"(*runat),"r"(b0unat), \
  22.150 +        "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory");            \
  22.151 +}while(0)
  22.152 +
  22.153  IA64FAULT vcpu_bsw1(VCPU *vcpu)
  22.154  {
  22.155  	// TODO: Only allowed for current vcpu
  22.156 @@ -1520,12 +1581,25 @@ IA64FAULT vcpu_bsw1(VCPU *vcpu)
  22.157  	unsigned long *r = &regs->r16;
  22.158  	unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
  22.159  	unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
  22.160 -	int i;
  22.161 +	unsigned long *runat = &regs->eml_unat;
  22.162 +	unsigned long *b0unat = &PSCB(vcpu,vbnat);
  22.163 +	unsigned long *b1unat = &PSCB(vcpu,vnat);
  22.164  
  22.165 -	if (!PSCB(vcpu,banknum)) {
  22.166 -		for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
  22.167 -		PSCB(vcpu,banknum) = 1;
  22.168 -	}
  22.169 +	unsigned long i;
  22.170 +
  22.171 +    if(VMX_DOMAIN(vcpu)){
  22.172 +        if(!(VCPU(vcpu,vpsr)&IA64_PSR_BN)){
  22.173 +            for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
  22.174 +            vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
  22.175 +            VCPU(vcpu,vpsr) |= IA64_PSR_BN;
  22.176 +        }
  22.177 +    }else{
  22.178 +        if (!PSCB(vcpu,banknum)) {
  22.179 +            for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
  22.180 +            vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
  22.181 +            PSCB(vcpu,banknum) = 1;
  22.182 +        }
  22.183 +    }
  22.184  	return (IA64_NO_FAULT);
  22.185  }
  22.186  
    23.1 --- a/xen/include/asm-ia64/ia64_int.h	Sun Sep 18 12:18:57 2005 -0600
    23.2 +++ b/xen/include/asm-ia64/ia64_int.h	Mon Sep 19 11:08:20 2005 -0600
    23.3 @@ -33,14 +33,10 @@
    23.4  #define IA64_TAKEN_BRANCH_TRAP_VECTOR		0x5f00
    23.5  #define IA64_SINGLE_STEP_TRAP_VECTOR		0x6000
    23.6  
    23.7 -#define	IA64_NO_FAULT		0x0001
    23.8 +#define	IA64_NO_FAULT		0x0000
    23.9 +#define IA64_FAULT		        0x0001
   23.10  #define	IA64_RFI_IN_PROGRESS	0x0002
   23.11  #define IA64_RETRY              0x0003
   23.12 -#ifdef  CONFIG_VTI
   23.13 -#undef  IA64_NO_FAULT
   23.14 -#define	IA64_NO_FAULT		0x0000
   23.15 -#define IA64_FAULT		0x0001
   23.16 -#endif      //CONFIG_VTI
   23.17  #define IA64_FORCED_IFA         0x0004
   23.18  #define	IA64_ILLOP_FAULT	(IA64_GENEX_VECTOR | 0x00)
   23.19  #define	IA64_PRIVOP_FAULT	(IA64_GENEX_VECTOR | 0x10)
   23.20 @@ -49,7 +45,7 @@
   23.21  #define	IA64_DISIST_FAULT	(IA64_GENEX_VECTOR | 0x40)
   23.22  #define	IA64_ILLDEP_FAULT	(IA64_GENEX_VECTOR | 0x80)
   23.23  #define	IA64_DTLB_FAULT		(IA64_DATA_TLB_VECTOR)
   23.24 -
   23.25 +#define IA64_VHPT_FAULT     (IA64_VHPT_TRANS_VECTOR | 0x10)
   23.26  #if !defined(__ASSEMBLY__)
   23.27  typedef unsigned long IA64FAULT;
   23.28  typedef unsigned long IA64INTVECTOR;
    24.1 --- a/xen/include/asm-ia64/privop.h	Sun Sep 18 12:18:57 2005 -0600
    24.2 +++ b/xen/include/asm-ia64/privop.h	Mon Sep 19 11:08:20 2005 -0600
    24.3 @@ -2,11 +2,11 @@
    24.4  #define _XEN_IA64_PRIVOP_H
    24.5  
    24.6  #include <asm/ia64_int.h>
    24.7 -#ifdef CONFIG_VTI
    24.8 +//#ifdef CONFIG_VTI
    24.9  #include <asm/vmx_vcpu.h>
   24.10 -#else //CONFIG_VTI
   24.11 +//#else //CONFIG_VTI
   24.12  #include <asm/vcpu.h>
   24.13 -#endif //CONFIG_VTI
   24.14 +//#endif //CONFIG_VTI
   24.15  
   24.16  typedef unsigned long IA64_INST;
   24.17  
   24.18 @@ -95,7 +95,7 @@ typedef union U_INST64_M33 {
   24.19  typedef union U_INST64_M35 {
   24.20      IA64_INST inst;
   24.21      struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
   24.22 -    	
   24.23 +   
   24.24  } INST64_M35;
   24.25  
   24.26  typedef union U_INST64_M36 {
    25.1 --- a/xen/include/asm-ia64/vcpu.h	Sun Sep 18 12:18:57 2005 -0600
    25.2 +++ b/xen/include/asm-ia64/vcpu.h	Mon Sep 19 11:08:20 2005 -0600
    25.3 @@ -35,7 +35,8 @@ struct privop_addr_count {
    25.4  
    25.5  /* general registers */
    25.6  extern UINT64 vcpu_get_gr(VCPU *vcpu, unsigned reg);
    25.7 -extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value);
    25.8 +extern IA64FAULT vcpu_get_gr_nat(VCPU *vcpu, unsigned reg, UINT64 *val);
    25.9 +extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value, int nat);
   25.10  /* application registers */
   25.11  extern IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val);
   25.12  /* psr */
    26.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Sun Sep 18 12:18:57 2005 -0600
    26.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Mon Sep 19 11:08:20 2005 -0600
    26.3 @@ -93,8 +93,10 @@ extern IA64FAULT vmx_vcpu_rfi(VCPU *vcpu
    26.4  extern UINT64 vmx_vcpu_get_psr(VCPU *vcpu);
    26.5  extern IA64FAULT vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val);
    26.6  extern IA64FAULT vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat);
    26.7 +#if 0
    26.8  extern IA64FAULT vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val);
    26.9  extern IA64FAULT vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat);
   26.10 +#endif
   26.11  extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24);
   26.12  extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24);
   26.13  extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
   26.14 @@ -453,6 +455,7 @@ IA64FAULT vmx_vcpu_get_pmd(VCPU *vcpu, U
   26.15  /**************************************************************************
   26.16   VCPU banked general register access routines
   26.17  **************************************************************************/
   26.18 +#if 0
   26.19  static inline
   26.20  IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
   26.21  {
   26.22 @@ -467,6 +470,7 @@ IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
   26.23      VCPU(vcpu,vpsr) |= IA64_PSR_BN;
   26.24      return (IA64_NO_FAULT);
   26.25  }
   26.26 +#endif
   26.27  #if 0
   26.28  /* Another hash performance algorithm */
   26.29  #define redistribute_rid(rid)	(((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
    27.1 --- a/xen/include/asm-ia64/vmx_vpd.h	Sun Sep 18 12:18:57 2005 -0600
    27.2 +++ b/xen/include/asm-ia64/vmx_vpd.h	Mon Sep 19 11:08:20 2005 -0600
    27.3 @@ -68,6 +68,11 @@ struct arch_vmx_struct {
    27.4  	vtime_t	    vtm;
    27.5      unsigned long   vrr[8];
    27.6      unsigned long   vkr[8];
    27.7 +    unsigned long   cr_iipa;   /* for emulation */
    27.8 +    unsigned long   cr_isr;    /* for emulation */
    27.9 +    unsigned long   cause;
   27.10 +    unsigned long   opcode;
   27.11 +
   27.12  //    unsigned long   mrr5;
   27.13  //    unsigned long   mrr6;
   27.14  //    unsigned long   mrr7;
    28.1 --- a/xen/include/asm-ia64/xenkregs.h	Sun Sep 18 12:18:57 2005 -0600
    28.2 +++ b/xen/include/asm-ia64/xenkregs.h	Mon Sep 19 11:08:20 2005 -0600
    28.3 @@ -8,13 +8,6 @@
    28.4  #define	IA64_TR_VHPT		4	/* dtr4: vhpt */
    28.5  #define IA64_TR_ARCH_INFO      5
    28.6  
    28.7 -#ifdef CONFIG_VTI
    28.8 -#define IA64_TR_VHPT_IN_DOM	5	/* dtr5: Double mapping for vhpt table in domain space */
    28.9 -#define IA64_TR_XEN_IN_DOM	6	/* itr6, dtr6: Double mapping for xen image in domain space */
   28.10 -#define IA64_TR_RR7_SWITCH_STUB	7	/* dtr7: mapping for rr7 switch stub */
   28.11 -#define IA64_TEMP_PHYSICAL	8	/* itr8, dtr8: temp mapping for guest physical memory 256M */
   28.12 -#endif // CONFIG_VTI
   28.13 -
   28.14  /* Processor status register bits: */
   28.15  #define IA64_PSR_VM_BIT		46
   28.16  #define IA64_PSR_VM	(__IA64_UL(1) << IA64_PSR_VM_BIT)
    29.1 --- a/xen/include/asm-ia64/xensystem.h	Sun Sep 18 12:18:57 2005 -0600
    29.2 +++ b/xen/include/asm-ia64/xensystem.h	Mon Sep 19 11:08:20 2005 -0600
    29.3 @@ -16,10 +16,6 @@
    29.4  /* Define HV space hierarchy */
    29.5  #define XEN_VIRT_SPACE_LOW	 0xe800000000000000
    29.6  #define XEN_VIRT_SPACE_HIGH	 0xf800000000000000	
    29.7 -/* This is address to mapping rr7 switch stub, in region 5 */
    29.8 -#ifdef CONFIG_VTI
    29.9 -#define XEN_RR7_SWITCH_STUB	 0xb700000000000000
   29.10 -#endif // CONFIG_VTI
   29.11  
   29.12  #define XEN_START_ADDR		 0xf000000000000000
   29.13  #define HYPERVISOR_VIRT_START	 0xf000000000000000
    30.1 --- a/xen/include/public/arch-ia64.h	Sun Sep 18 12:18:57 2005 -0600
    30.2 +++ b/xen/include/public/arch-ia64.h	Mon Sep 19 11:08:20 2005 -0600
    30.3 @@ -94,38 +94,22 @@ typedef struct cpu_user_regs{
    30.4  	unsigned long r14;		/* scratch */
    30.5  	unsigned long r2;		/* scratch */
    30.6  	unsigned long r3;		/* scratch */
    30.7 -
    30.8 -    union {
    30.9 -      struct {
   30.10 -    	/* The following registers are saved by SAVE_REST: */
   30.11 -	    unsigned long r16;		/* scratch */
   30.12 -    	unsigned long r17;		/* scratch */
   30.13 -	    unsigned long r18;		/* scratch */
   30.14 -    	unsigned long r19;		/* scratch */
   30.15 -	    unsigned long r20;		/* scratch */
   30.16 -    	unsigned long r21;		/* scratch */
   30.17 -    	unsigned long r22;		/* scratch */
   30.18 -    	unsigned long r23;		/* scratch */
   30.19 -    	unsigned long r24;		/* scratch */
   30.20 -    	unsigned long r25;		/* scratch */
   30.21 -    	unsigned long r26;		/* scratch */
   30.22 -    	unsigned long r27;		/* scratch */
   30.23 -    	unsigned long r28;		/* scratch */
   30.24 -    	unsigned long r29;		/* scratch */
   30.25 -    	unsigned long r30;		/* scratch */
   30.26 -    	unsigned long r31;		/* scratch */
   30.27 -      };
   30.28 -      struct {
   30.29 -    	unsigned long r4;		/* preserved */
   30.30 -	    unsigned long r5;		/* preserved */
   30.31 -    	unsigned long r6;		/* preserved */
   30.32 -	    unsigned long r7;		/* preserved */
   30.33 -    	unsigned long cr_iipa;   /* for emulation */
   30.34 -	    unsigned long cr_isr;    /* for emulation */
   30.35 -    	unsigned long eml_unat;    /* used for emulating instruction */
   30.36 -	    unsigned long rfi_pfs;     /* used for elulating rfi */
   30.37 -      };
   30.38 -    };
   30.39 +    unsigned long r16;		/* scratch */
   30.40 +   	unsigned long r17;		/* scratch */
   30.41 +    unsigned long r18;		/* scratch */
   30.42 +   	unsigned long r19;		/* scratch */
   30.43 +    unsigned long r20;		/* scratch */
   30.44 +   	unsigned long r21;		/* scratch */
   30.45 +   	unsigned long r22;		/* scratch */
   30.46 +   	unsigned long r23;		/* scratch */
   30.47 +   	unsigned long r24;		/* scratch */
   30.48 +   	unsigned long r25;		/* scratch */
   30.49 +   	unsigned long r26;		/* scratch */
   30.50 +   	unsigned long r27;		/* scratch */
   30.51 +   	unsigned long r28;		/* scratch */
   30.52 +   	unsigned long r29;		/* scratch */
   30.53 +   	unsigned long r30;		/* scratch */
   30.54 +   	unsigned long r31;		/* scratch */
   30.55  	unsigned long ar_ccv;		/* compare/exchange value (scratch) */
   30.56  
   30.57  	/*
   30.58 @@ -137,6 +121,13 @@ typedef struct cpu_user_regs{
   30.59  	struct pt_fpreg f9;		/* scratch */
   30.60  	struct pt_fpreg f10;		/* scratch */
   30.61  	struct pt_fpreg f11;		/* scratch */
   30.62 +  	unsigned long r4;		/* preserved */
   30.63 +    unsigned long r5;		/* preserved */
   30.64 +   	unsigned long r6;		/* preserved */
   30.65 +    unsigned long r7;		/* preserved */
   30.66 +   	unsigned long eml_unat;    /* used for emulating instruction */
   30.67 +    unsigned long rfi_pfs;     /* used for elulating rfi */
   30.68 +
   30.69  }cpu_user_regs_t;
   30.70  
   30.71  typedef union {