direct-io.hg

changeset 11986:78c494a16b95

[IA64] deopfuscate vcpu.c and priop.c and related headers

De-opfuscate vcpu.c, priovop.c and related header files to make them
easier to debug and read. In addition eliminate bogus UINT64 and UINT
data types which only exist in the ia64 tree and are totally
unnecessary as we have u64 already.

Signed-off-by: Jes Sorensen <jes@sgi.com>
author awilliam@xenbuild.aw
date Tue Oct 17 15:43:41 2006 -0600 (2006-10-17)
parents c5ddcf89f050
children 06ed19691f6d
files xen/arch/ia64/vmx/pal_emul.c xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/vmx/vmx_virt.c xen/arch/ia64/xen/privop.c xen/arch/ia64/xen/vcpu.c xen/arch/ia64/xen/vhpt.c xen/include/asm-ia64/dom_fw.h xen/include/asm-ia64/privop.h xen/include/asm-ia64/vcpu.h xen/include/asm-ia64/vmx_pal_vsa.h xen/include/asm-ia64/vmx_phy_mode.h xen/include/asm-ia64/vmx_vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/pal_emul.c	Tue Oct 17 14:30:36 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/pal_emul.c	Tue Oct 17 15:43:41 2006 -0600
     1.3 @@ -48,7 +48,7 @@
     1.4  	}
     1.5  
     1.6  static void
     1.7 -get_pal_parameters(VCPU *vcpu, UINT64 *gr29, UINT64 *gr30, UINT64 *gr31) {
     1.8 +get_pal_parameters(VCPU *vcpu, u64 *gr29, u64 *gr30, u64 *gr31) {
     1.9  
    1.10  	vcpu_get_gr_nat(vcpu,29,gr29);
    1.11  	vcpu_get_gr_nat(vcpu,30,gr30); 
    1.12 @@ -75,7 +75,7 @@ set_sal_result(VCPU *vcpu,struct sal_ret
    1.13  
    1.14  static struct ia64_pal_retval
    1.15  pal_cache_flush(VCPU *vcpu) {
    1.16 -	UINT64 gr28,gr29, gr30, gr31;
    1.17 +	u64 gr28,gr29, gr30, gr31;
    1.18  	struct ia64_pal_retval result;
    1.19  
    1.20  	get_pal_parameters(vcpu, &gr29, &gr30, &gr31);
    1.21 @@ -384,7 +384,7 @@ pal_vm_page_size(VCPU *vcpu) {
    1.22  
    1.23  void
    1.24  pal_emul(VCPU *vcpu) {
    1.25 -	UINT64 gr28;
    1.26 +	u64 gr28;
    1.27  	struct ia64_pal_retval result;
    1.28  
    1.29  	vcpu_get_gr_nat(vcpu,28,&gr28);  //bank1
     2.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Tue Oct 17 14:30:36 2006 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Tue Oct 17 15:43:41 2006 -0600
     2.3 @@ -49,8 +49,8 @@
     2.4   * Update the checked last_itc.
     2.5   */
     2.6  
     2.7 -extern void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
     2.8 -     UINT64 vector,REGS *regs);
     2.9 +extern void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
    2.10 +                                     u64 vector, REGS *regs);
    2.11  static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
    2.12  {
    2.13      vtm->last_itc = cur_itc;
    2.14 @@ -533,7 +533,7 @@ int vmx_vcpu_pend_interrupt(VCPU *vcpu, 
    2.15   * The interrupt source is contained in pend_irr[0-3] with
    2.16   * each bits stand for one interrupt.
    2.17   */
    2.18 -void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, UINT64 *pend_irr)
    2.19 +void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, u64 *pend_irr)
    2.20  {
    2.21      uint64_t    spsr;
    2.22      int     i;
     3.1 --- a/xen/arch/ia64/vmx/vmmu.c	Tue Oct 17 14:30:36 2006 -0600
     3.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Tue Oct 17 15:43:41 2006 -0600
     3.3 @@ -363,7 +363,7 @@ fetch_code(VCPU *vcpu, u64 gip, IA64_BUN
     3.4      return IA64_NO_FAULT;
     3.5  }
     3.6  
     3.7 -IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
     3.8 +IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
     3.9  {
    3.10  #ifdef VTLB_DEBUG
    3.11      int slot;
    3.12 @@ -382,7 +382,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
    3.13      return IA64_NO_FAULT;
    3.14  }
    3.15  
    3.16 -IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
    3.17 +IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
    3.18  {
    3.19      u64 gpfn;
    3.20  #ifdef VTLB_DEBUG    
    3.21 @@ -478,7 +478,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
    3.22  
    3.23  
    3.24  
    3.25 -IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 ifa,UINT64 ps)
    3.26 +IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,u64 ifa, u64 ps)
    3.27  {
    3.28      int index;
    3.29      u64 va;
    3.30 @@ -491,7 +491,7 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT
    3.31      return IA64_NO_FAULT;
    3.32  }
    3.33  
    3.34 -IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 ifa,UINT64 ps)
    3.35 +IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu, u64 ifa, u64 ps)
    3.36  {
    3.37      int index;
    3.38      u64 va;
    3.39 @@ -504,7 +504,7 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT
    3.40      return IA64_NO_FAULT;
    3.41  }
    3.42  
    3.43 -IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 va, UINT64 ps)
    3.44 +IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, u64 va, u64 ps)
    3.45  {
    3.46      va = PAGEALIGN(va, ps);
    3.47      thash_purge_entries(vcpu, va, ps);
    3.48 @@ -512,19 +512,19 @@ IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UIN
    3.49  }
    3.50  
    3.51  
    3.52 -IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 va)
    3.53 +IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, u64 va)
    3.54  {
    3.55      thash_purge_all(vcpu);
    3.56      return IA64_NO_FAULT;
    3.57  }
    3.58  
    3.59 -IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 va, UINT64 ps)
    3.60 +IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, u64 va, u64 ps)
    3.61  {
    3.62      vmx_vcpu_ptc_ga(vcpu, va, ps);
    3.63      return IA64_ILLOP_FAULT;
    3.64  }
    3.65  /*
    3.66 -IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
    3.67 +IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
    3.68  {
    3.69      vmx_vcpu_ptc_l(vcpu, va, ps);
    3.70      return IA64_NO_FAULT;
    3.71 @@ -562,7 +562,7 @@ static void ptc_ga_remote_func (void *va
    3.72  }
    3.73  
    3.74  
    3.75 -IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
    3.76 +IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
    3.77  {
    3.78  
    3.79      struct domain *d = vcpu->domain;
    3.80 @@ -596,7 +596,7 @@ IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UIN
    3.81  }
    3.82  
    3.83  
    3.84 -IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
    3.85 +IA64FAULT vmx_vcpu_thash(VCPU *vcpu, u64 vadr, u64 *pval)
    3.86  {
    3.87      PTA vpta;
    3.88      ia64_rr vrr;
    3.89 @@ -616,7 +616,7 @@ IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UIN
    3.90  }
    3.91  
    3.92  
    3.93 -IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
    3.94 +IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, u64 vadr, u64 *pval)
    3.95  {
    3.96      ia64_rr vrr;
    3.97      PTA vpta;
    3.98 @@ -632,7 +632,7 @@ IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT
    3.99  
   3.100  
   3.101  
   3.102 -IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
   3.103 +IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 vadr, u64 *padr)
   3.104  {
   3.105      thash_data_t *data;
   3.106      ISR visr,pt_isr;
   3.107 @@ -718,7 +718,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
   3.108      }
   3.109  }
   3.110  
   3.111 -IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
   3.112 +IA64FAULT vmx_vcpu_tak(VCPU *vcpu, u64 vadr, u64 *key)
   3.113  {
   3.114      thash_data_t *data;
   3.115      PTA vpta;
     4.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Tue Oct 17 14:30:36 2006 -0600
     4.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Tue Oct 17 15:43:41 2006 -0600
     4.3 @@ -204,7 +204,7 @@ vmx_load_all_rr(VCPU *vcpu)
     4.4  void
     4.5  switch_to_physical_rid(VCPU *vcpu)
     4.6  {
     4.7 -    UINT64 psr;
     4.8 +    u64 psr;
     4.9      /* Save original virtual mode rr[0] and rr[4] */
    4.10      psr=ia64_clear_ic();
    4.11      ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
    4.12 @@ -221,7 +221,7 @@ switch_to_physical_rid(VCPU *vcpu)
    4.13  void
    4.14  switch_to_virtual_rid(VCPU *vcpu)
    4.15  {
    4.16 -    UINT64 psr;
    4.17 +    u64 psr;
    4.18      psr=ia64_clear_ic();
    4.19      ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
    4.20      ia64_srlz_d();
     5.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Tue Oct 17 14:30:36 2006 -0600
     5.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Tue Oct 17 15:43:41 2006 -0600
     5.3 @@ -66,7 +66,7 @@ extern unsigned long handle_fpu_swa (int
     5.4  #define DOMN_PAL_REQUEST    0x110000
     5.5  #define DOMN_SAL_REQUEST    0x110001
     5.6  
     5.7 -static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800,
     5.8 +static u64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800,
     5.9      0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
    5.10      0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
    5.11      0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
    5.12 @@ -78,12 +78,12 @@ static UINT64 vec2off[68] = {0x0,0x400,0
    5.13  
    5.14  
    5.15  
    5.16 -void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
    5.17 -     UINT64 vector,REGS *regs)
    5.18 +void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
    5.19 +                              u64 vector, REGS *regs)
    5.20  {
    5.21 -    UINT64 status;
    5.22 +    u64 status;
    5.23      VCPU *vcpu = current;
    5.24 -    UINT64 vpsr = VCPU(vcpu, vpsr);
    5.25 +    u64 vpsr = VCPU(vcpu, vpsr);
    5.26      vector=vec2off[vector];
    5.27      if(!(vpsr&IA64_PSR_IC)&&(vector!=IA64_DATA_NESTED_TLB_VECTOR)){
    5.28          panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
    5.29 @@ -253,7 +253,7 @@ void leave_hypervisor_tail(struct pt_reg
    5.30      }
    5.31  }
    5.32  
    5.33 -extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
    5.34 +extern ia64_rr vmx_vcpu_rr(VCPU *vcpu, u64 vadr);
    5.35  
    5.36  static int vmx_handle_lds(REGS* regs)
    5.37  {
     6.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Tue Oct 17 14:30:36 2006 -0600
     6.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Tue Oct 17 15:43:41 2006 -0600
     6.3 @@ -82,7 +82,7 @@ void
     6.4  vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
     6.5  {
     6.6  
     6.7 -    UINT64 mask;
     6.8 +    u64 mask;
     6.9      REGS *regs;
    6.10      IA64_PSR old_psr, new_psr;
    6.11      old_psr.val=VCPU(vcpu, vpsr);
    6.12 @@ -208,7 +208,7 @@ vmx_vcpu_get_plat(VCPU *vcpu)
    6.13  
    6.14  
    6.15  
    6.16 -IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
    6.17 +IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
    6.18  {
    6.19      ia64_rr oldrr,newrr;
    6.20      extern void * pal_vaddr;
    6.21 @@ -252,14 +252,14 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI
    6.22   VCPU protection key register access routines
    6.23  **************************************************************************/
    6.24  
    6.25 -IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
    6.26 +IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg, u64 *pval)
    6.27  {
    6.28 -    UINT64 val = (UINT64)ia64_get_pkr(reg);
    6.29 +    u64 val = (u64)ia64_get_pkr(reg);
    6.30      *pval = val;
    6.31      return (IA64_NO_FAULT);
    6.32  }
    6.33  
    6.34 -IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
    6.35 +IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, u64 reg, u64 val)
    6.36  {
    6.37      ia64_set_pkr(reg,val);
    6.38      return (IA64_NO_FAULT);
    6.39 @@ -295,7 +295,7 @@ u64 vmx_vcpu_get_itir_on_fault(VCPU *vcp
    6.40  IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
    6.41  {
    6.42      // TODO: Only allowed for current vcpu
    6.43 -    UINT64 ifs, psr;
    6.44 +    u64 ifs, psr;
    6.45      REGS *regs = vcpu_regs(vcpu);
    6.46      psr = VCPU(vcpu,ipsr);
    6.47      if (psr & IA64_PSR_BN)
    6.48 @@ -313,7 +313,7 @@ IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
    6.49  
    6.50  #if 0
    6.51  IA64FAULT
    6.52 -vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
    6.53 +vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, u64 *val)
    6.54  {
    6.55      IA64_PSR vpsr;
    6.56  
    6.57 @@ -366,7 +366,7 @@ vmx_vcpu_set_bgr(VCPU *vcpu, unsigned in
    6.58  #endif
    6.59  #if 0
    6.60  IA64FAULT
    6.61 -vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
    6.62 +vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, u64 * val)
    6.63  {
    6.64      REGS *regs=vcpu_regs(vcpu);
    6.65      int nat;
    6.66 @@ -413,18 +413,18 @@ vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg
    6.67      This function gets guest PSR
    6.68   */
    6.69  
    6.70 -UINT64 vmx_vcpu_get_psr(VCPU *vcpu)
    6.71 +u64 vmx_vcpu_get_psr(VCPU *vcpu)
    6.72  {
    6.73 -    UINT64 mask;
    6.74 +    u64 mask;
    6.75      REGS *regs = vcpu_regs(vcpu);
    6.76      mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
    6.77             IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
    6.78      return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
    6.79  }
    6.80  
    6.81 -IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
    6.82 +IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, u64 imm24)
    6.83  {
    6.84 -    UINT64 vpsr;
    6.85 +    u64 vpsr;
    6.86      vpsr = vmx_vcpu_get_psr(vcpu);
    6.87      vpsr &= (~imm24);
    6.88      vmx_vcpu_set_psr(vcpu, vpsr);
    6.89 @@ -432,9 +432,9 @@ IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vc
    6.90  }
    6.91  
    6.92  
    6.93 -IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
    6.94 +IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, u64 imm24)
    6.95  {
    6.96 -    UINT64 vpsr;
    6.97 +    u64 vpsr;
    6.98      vpsr = vmx_vcpu_get_psr(vcpu);
    6.99      vpsr |= imm24;
   6.100      vmx_vcpu_set_psr(vcpu, vpsr);
   6.101 @@ -442,7 +442,7 @@ IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu
   6.102  }
   6.103  
   6.104  
   6.105 -IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
   6.106 +IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, u64 val)
   6.107  {
   6.108      val = (val & MASK(0, 32)) | (vmx_vcpu_get_psr(vcpu) & MASK(32, 32));
   6.109      vmx_vcpu_set_psr(vcpu, val);
     7.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Tue Oct 17 14:30:36 2006 -0600
     7.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Tue Oct 17 15:43:41 2006 -0600
     7.3 @@ -32,7 +32,7 @@
     7.4  #include <asm/vmx_phy_mode.h>
     7.5  
     7.6  void
     7.7 -ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64  * cause)
     7.8 +ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, u64 * cause)
     7.9  {
    7.10      *cause=0;
    7.11      switch (slot_type) {
    7.12 @@ -144,20 +144,20 @@ ia64_priv_decoder(IA64_SLOT_TYPE slot_ty
    7.13  
    7.14  IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
    7.15  {
    7.16 -    UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
    7.17 +    u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
    7.18      return vmx_vcpu_reset_psr_sm(vcpu,imm24);
    7.19  }
    7.20  
    7.21  IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
    7.22  {
    7.23 -    UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
    7.24 +    u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
    7.25      return vmx_vcpu_set_psr_sm(vcpu,imm24);
    7.26  }
    7.27  
    7.28  IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
    7.29  {
    7.30 -    UINT64 tgt = inst.M33.r1;
    7.31 -    UINT64 val;
    7.32 +    u64 tgt = inst.M33.r1;
    7.33 +    u64 val;
    7.34  
    7.35  /*
    7.36      if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
    7.37 @@ -174,7 +174,7 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vc
    7.38   */
    7.39  IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
    7.40  {
    7.41 -    UINT64 val;
    7.42 +    u64 val;
    7.43  
    7.44      if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
    7.45  	panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
    7.46 @@ -566,7 +566,7 @@ IA64FAULT vmx_emul_tak(VCPU *vcpu, INST6
    7.47  
    7.48  IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
    7.49  {
    7.50 -    UINT64 itir, ifa, pte, slot;
    7.51 +    u64 itir, ifa, pte, slot;
    7.52  #ifdef  VMAL_NO_FAULT_CHECK
    7.53      IA64_PSR  vpsr;
    7.54      vpsr.val=vmx_vcpu_get_psr(vcpu);
    7.55 @@ -623,7 +623,7 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS
    7.56  
    7.57  IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
    7.58  {
    7.59 -    UINT64 itir, ifa, pte, slot;
    7.60 +    u64 itir, ifa, pte, slot;
    7.61  #ifdef  VMAL_NO_FAULT_CHECK
    7.62      ISR isr;
    7.63      IA64_PSR  vpsr;
    7.64 @@ -691,7 +691,7 @@ IA64FAULT itc_fault_check(VCPU *vcpu, IN
    7.65          return IA64_FAULT;
    7.66      }
    7.67  
    7.68 -    UINT64 fault;
    7.69 +    u64 fault;
    7.70      ISR isr;
    7.71      if ( vpsr.cpl != 0) {
    7.72          /* Inject Privileged Operation fault into guest */
    7.73 @@ -729,7 +729,7 @@ IA64FAULT itc_fault_check(VCPU *vcpu, IN
    7.74  
    7.75  IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
    7.76  {
    7.77 -    UINT64 itir, ifa, pte;
    7.78 +    u64 itir, ifa, pte;
    7.79  
    7.80      if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
    7.81      	return IA64_FAULT;
    7.82 @@ -740,7 +740,7 @@ IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INS
    7.83  
    7.84  IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
    7.85  {
    7.86 -    UINT64 itir, ifa, pte;
    7.87 +    u64 itir, ifa, pte;
    7.88  
    7.89      if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
    7.90      	return IA64_FAULT;
    7.91 @@ -757,7 +757,7 @@ IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INS
    7.92  IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
    7.93  {
    7.94      // I27 and M30 are identical for these fields
    7.95 -    UINT64  imm;
    7.96 +    u64 imm;
    7.97  
    7.98      if(inst.M30.ar3!=44){
    7.99          panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
   7.100 @@ -1277,8 +1277,8 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
   7.101  
   7.102  IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
   7.103  {
   7.104 -    UINT64 tgt = inst.M33.r1;
   7.105 -    UINT64 val;
   7.106 +    u64 tgt = inst.M33.r1;
   7.107 +    u64 val;
   7.108      IA64FAULT fault;
   7.109  #ifdef  CHECK_FAULT
   7.110      IA64_PSR vpsr;
   7.111 @@ -1353,7 +1353,7 @@ vmx_emulate(VCPU *vcpu, REGS *regs)
   7.112  {
   7.113      IA64FAULT status;
   7.114      INST64 inst;
   7.115 -    UINT64 iip, cause, opcode;
   7.116 +    u64 iip, cause, opcode;
   7.117      iip = regs->cr_iip;
   7.118      cause = VMX(vcpu,cause);
   7.119      opcode = VMX(vcpu,opcode);
     8.1 --- a/xen/arch/ia64/xen/privop.c	Tue Oct 17 14:30:36 2006 -0600
     8.2 +++ b/xen/arch/ia64/xen/privop.c	Tue Oct 17 15:43:41 2006 -0600
     8.3 @@ -9,13 +9,13 @@
     8.4  #include <asm/privop.h>
     8.5  #include <asm/vcpu.h>
     8.6  #include <asm/processor.h>
     8.7 -#include <asm/delay.h>	// Debug only
     8.8 +#include <asm/delay.h>		// Debug only
     8.9  #include <asm/dom_fw.h>
    8.10  #include <asm/vhpt.h>
    8.11  #include <asm/bundle.h>
    8.12  #include <xen/perfc.h>
    8.13  
    8.14 -long priv_verbose=0;
    8.15 +long priv_verbose = 0;
    8.16  unsigned long privop_trace = 0;
    8.17  
    8.18  /* Set to 1 to handle privified instructions from the privify tool. */
    8.19 @@ -29,200 +29,205 @@ static const int privify_en = 1;
    8.20  Privileged operation emulation routines
    8.21  **************************************************************************/
    8.22  
    8.23 -static IA64FAULT priv_rfi(VCPU *vcpu, INST64 inst)
    8.24 +static IA64FAULT priv_rfi(VCPU * vcpu, INST64 inst)
    8.25  {
    8.26  	return vcpu_rfi(vcpu);
    8.27  }
    8.28  
    8.29 -static IA64FAULT priv_bsw0(VCPU *vcpu, INST64 inst)
    8.30 +static IA64FAULT priv_bsw0(VCPU * vcpu, INST64 inst)
    8.31  {
    8.32  	return vcpu_bsw0(vcpu);
    8.33  }
    8.34  
    8.35 -static IA64FAULT priv_bsw1(VCPU *vcpu, INST64 inst)
    8.36 +static IA64FAULT priv_bsw1(VCPU * vcpu, INST64 inst)
    8.37  {
    8.38  	return vcpu_bsw1(vcpu);
    8.39  }
    8.40  
    8.41 -static IA64FAULT priv_cover(VCPU *vcpu, INST64 inst)
    8.42 +static IA64FAULT priv_cover(VCPU * vcpu, INST64 inst)
    8.43  {
    8.44  	return vcpu_cover(vcpu);
    8.45  }
    8.46  
    8.47 -static IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
    8.48 +static IA64FAULT priv_ptc_l(VCPU * vcpu, INST64 inst)
    8.49  {
    8.50 -	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
    8.51 -	UINT64 log_range;
    8.52 +	u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
    8.53 +	u64 log_range;
    8.54  
    8.55 -	log_range = ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
    8.56 -	return vcpu_ptc_l(vcpu,vadr,log_range);
    8.57 +	log_range = ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2);
    8.58 +	return vcpu_ptc_l(vcpu, vadr, log_range);
    8.59  }
    8.60  
    8.61 -static IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
    8.62 +static IA64FAULT priv_ptc_e(VCPU * vcpu, INST64 inst)
    8.63  {
    8.64 -	UINT src = inst.M28.r3;
    8.65 +	unsigned int src = inst.M28.r3;
    8.66  
    8.67  	// NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
    8.68  	if (privify_en && src > 63)
    8.69 -		return(vcpu_fc(vcpu,vcpu_get_gr(vcpu,src - 64)));
    8.70 -	return vcpu_ptc_e(vcpu,vcpu_get_gr(vcpu,src));
    8.71 +		return vcpu_fc(vcpu, vcpu_get_gr(vcpu, src - 64));
    8.72 +	return vcpu_ptc_e(vcpu, vcpu_get_gr(vcpu, src));
    8.73  }
    8.74  
    8.75 -static IA64FAULT priv_ptc_g(VCPU *vcpu, INST64 inst)
    8.76 +static IA64FAULT priv_ptc_g(VCPU * vcpu, INST64 inst)
    8.77  {
    8.78 -	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
    8.79 -	UINT64 addr_range;
    8.80 +	u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
    8.81 +	u64 addr_range;
    8.82  
    8.83 -	addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
    8.84 -	return vcpu_ptc_g(vcpu,vadr,addr_range);
    8.85 +	addr_range = 1 << ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2);
    8.86 +	return vcpu_ptc_g(vcpu, vadr, addr_range);
    8.87  }
    8.88  
    8.89 -static IA64FAULT priv_ptc_ga(VCPU *vcpu, INST64 inst)
    8.90 +static IA64FAULT priv_ptc_ga(VCPU * vcpu, INST64 inst)
    8.91  {
    8.92 -	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
    8.93 -	UINT64 addr_range;
    8.94 +	u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
    8.95 +	u64 addr_range;
    8.96  
    8.97 -	addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
    8.98 -	return vcpu_ptc_ga(vcpu,vadr,addr_range);
    8.99 +	addr_range = 1 << ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2);
   8.100 +	return vcpu_ptc_ga(vcpu, vadr, addr_range);
   8.101  }
   8.102  
   8.103 -static IA64FAULT priv_ptr_d(VCPU *vcpu, INST64 inst)
   8.104 +static IA64FAULT priv_ptr_d(VCPU * vcpu, INST64 inst)
   8.105  {
   8.106 -	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
   8.107 -	UINT64 log_range;
   8.108 +	u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
   8.109 +	u64 log_range;
   8.110  
   8.111 -	log_range = (vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2;
   8.112 -	return vcpu_ptr_d(vcpu,vadr,log_range);
   8.113 +	log_range = (vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2;
   8.114 +	return vcpu_ptr_d(vcpu, vadr, log_range);
   8.115  }
   8.116  
   8.117 -static IA64FAULT priv_ptr_i(VCPU *vcpu, INST64 inst)
   8.118 +static IA64FAULT priv_ptr_i(VCPU * vcpu, INST64 inst)
   8.119  {
   8.120 -	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
   8.121 -	UINT64 log_range;
   8.122 +	u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
   8.123 +	u64 log_range;
   8.124  
   8.125 -	log_range = (vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2;
   8.126 -	return vcpu_ptr_i(vcpu,vadr,log_range);
   8.127 +	log_range = (vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2;
   8.128 +	return vcpu_ptr_i(vcpu, vadr, log_range);
   8.129  }
   8.130  
   8.131 -static IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
   8.132 +static IA64FAULT priv_tpa(VCPU * vcpu, INST64 inst)
   8.133  {
   8.134 -	UINT64 padr;
   8.135 -	UINT fault;
   8.136 -	UINT src = inst.M46.r3;
   8.137 +	u64 padr;
   8.138 +	unsigned int fault;
   8.139 +	unsigned int src = inst.M46.r3;
   8.140  
   8.141  	// NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
   8.142  	if (privify_en && src > 63)
   8.143 -		fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
   8.144 -	else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
   8.145 +		fault = vcpu_ttag(vcpu, vcpu_get_gr(vcpu, src - 64), &padr);
   8.146 +	else
   8.147 +		fault = vcpu_tpa(vcpu, vcpu_get_gr(vcpu, src), &padr);
   8.148  	if (fault == IA64_NO_FAULT)
   8.149  		return vcpu_set_gr(vcpu, inst.M46.r1, padr, 0);
   8.150 -	else return fault;
   8.151 +	else
   8.152 +		return fault;
   8.153  }
   8.154  
   8.155 -static IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
   8.156 +static IA64FAULT priv_tak(VCPU * vcpu, INST64 inst)
   8.157  {
   8.158 -	UINT64 key;
   8.159 -	UINT fault;
   8.160 -	UINT src = inst.M46.r3;
   8.161 +	u64 key;
   8.162 +	unsigned int fault;
   8.163 +	unsigned int src = inst.M46.r3;
   8.164  
   8.165  	// NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
   8.166  	if (privify_en && src > 63)
   8.167 -		fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
   8.168 -	else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
   8.169 +		fault = vcpu_thash(vcpu, vcpu_get_gr(vcpu, src - 64), &key);
   8.170 +	else
   8.171 +		fault = vcpu_tak(vcpu, vcpu_get_gr(vcpu, src), &key);
   8.172  	if (fault == IA64_NO_FAULT)
   8.173 -		return vcpu_set_gr(vcpu, inst.M46.r1, key,0);
   8.174 -	else return fault;
   8.175 +		return vcpu_set_gr(vcpu, inst.M46.r1, key, 0);
   8.176 +	else
   8.177 +		return fault;
   8.178  }
   8.179  
   8.180  /************************************
   8.181   * Insert translation register/cache
   8.182  ************************************/
   8.183  
   8.184 -static IA64FAULT priv_itr_d(VCPU *vcpu, INST64 inst)
   8.185 +static IA64FAULT priv_itr_d(VCPU * vcpu, INST64 inst)
   8.186  {
   8.187 -	UINT64 fault, itir, ifa, pte, slot;
   8.188 +	u64 fault, itir, ifa, pte, slot;
   8.189  
   8.190 -	//if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
   8.191 -	if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
   8.192 -		return(IA64_ILLOP_FAULT);
   8.193 -	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
   8.194 -		return(IA64_ILLOP_FAULT);
   8.195 -	pte = vcpu_get_gr(vcpu,inst.M42.r2);
   8.196 -	slot = vcpu_get_gr(vcpu,inst.M42.r3);
   8.197 +	//if (!vcpu_get_psr_ic(vcpu))
   8.198 +	//      return IA64_ILLOP_FAULT;
   8.199 +	if ((fault = vcpu_get_itir(vcpu, &itir)) != IA64_NO_FAULT)
   8.200 +		return IA64_ILLOP_FAULT;
   8.201 +	if ((fault = vcpu_get_ifa(vcpu, &ifa)) != IA64_NO_FAULT)
   8.202 +		return IA64_ILLOP_FAULT;
   8.203 +	pte = vcpu_get_gr(vcpu, inst.M42.r2);
   8.204 +	slot = vcpu_get_gr(vcpu, inst.M42.r3);
   8.205  
   8.206 -	return (vcpu_itr_d(vcpu,slot,pte,itir,ifa));
   8.207 +	return vcpu_itr_d(vcpu, slot, pte, itir, ifa);
   8.208  }
   8.209  
   8.210 -static IA64FAULT priv_itr_i(VCPU *vcpu, INST64 inst)
   8.211 +static IA64FAULT priv_itr_i(VCPU * vcpu, INST64 inst)
   8.212  {
   8.213 -	UINT64 fault, itir, ifa, pte, slot;
   8.214 +	u64 fault, itir, ifa, pte, slot;
   8.215  
   8.216 -	//if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
   8.217 -	if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
   8.218 -		return(IA64_ILLOP_FAULT);
   8.219 -	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
   8.220 -		return(IA64_ILLOP_FAULT);
   8.221 -	pte = vcpu_get_gr(vcpu,inst.M42.r2);
   8.222 -	slot = vcpu_get_gr(vcpu,inst.M42.r3);
   8.223 +	//if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT;
   8.224 +	if ((fault = vcpu_get_itir(vcpu, &itir)) != IA64_NO_FAULT)
   8.225 +		return IA64_ILLOP_FAULT;
   8.226 +	if ((fault = vcpu_get_ifa(vcpu, &ifa)) != IA64_NO_FAULT)
   8.227 +		return IA64_ILLOP_FAULT;
   8.228 +	pte = vcpu_get_gr(vcpu, inst.M42.r2);
   8.229 +	slot = vcpu_get_gr(vcpu, inst.M42.r3);
   8.230  
   8.231 -	return (vcpu_itr_i(vcpu,slot,pte,itir,ifa));
   8.232 +	return vcpu_itr_i(vcpu, slot, pte, itir, ifa);
   8.233  }
   8.234  
   8.235 -static IA64FAULT priv_itc_d(VCPU *vcpu, INST64 inst)
   8.236 +static IA64FAULT priv_itc_d(VCPU * vcpu, INST64 inst)
   8.237  {
   8.238 -	UINT64 fault, itir, ifa, pte;
   8.239 +	u64 fault, itir, ifa, pte;
   8.240  
   8.241 -	//if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
   8.242 -	if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
   8.243 -		return(IA64_ILLOP_FAULT);
   8.244 -	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
   8.245 -		return(IA64_ILLOP_FAULT);
   8.246 -	pte = vcpu_get_gr(vcpu,inst.M41.r2);
   8.247 +	//if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT;
   8.248 +	if ((fault = vcpu_get_itir(vcpu, &itir)) != IA64_NO_FAULT)
   8.249 +		return IA64_ILLOP_FAULT;
   8.250 +	if ((fault = vcpu_get_ifa(vcpu, &ifa)) != IA64_NO_FAULT)
   8.251 +		return IA64_ILLOP_FAULT;
   8.252 +	pte = vcpu_get_gr(vcpu, inst.M41.r2);
   8.253  
   8.254 -	return (vcpu_itc_d(vcpu,pte,itir,ifa));
   8.255 +	return vcpu_itc_d(vcpu, pte, itir, ifa);
   8.256  }
   8.257  
   8.258 -static IA64FAULT priv_itc_i(VCPU *vcpu, INST64 inst)
   8.259 +static IA64FAULT priv_itc_i(VCPU * vcpu, INST64 inst)
   8.260  {
   8.261 -	UINT64 fault, itir, ifa, pte;
   8.262 +	u64 fault, itir, ifa, pte;
   8.263  
   8.264 -	//if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
   8.265 -	if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
   8.266 -		return(IA64_ILLOP_FAULT);
   8.267 -	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
   8.268 -		return(IA64_ILLOP_FAULT);
   8.269 -	pte = vcpu_get_gr(vcpu,inst.M41.r2);
   8.270 +	//if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT;
   8.271 +	if ((fault = vcpu_get_itir(vcpu, &itir)) != IA64_NO_FAULT)
   8.272 +		return IA64_ILLOP_FAULT;
   8.273 +	if ((fault = vcpu_get_ifa(vcpu, &ifa)) != IA64_NO_FAULT)
   8.274 +		return IA64_ILLOP_FAULT;
   8.275 +	pte = vcpu_get_gr(vcpu, inst.M41.r2);
   8.276  
   8.277 -	return (vcpu_itc_i(vcpu,pte,itir,ifa));
   8.278 +	return vcpu_itc_i(vcpu, pte, itir, ifa);
   8.279  }
   8.280  
   8.281  /*************************************
   8.282   * Moves to semi-privileged registers
   8.283  *************************************/
   8.284  
   8.285 -static IA64FAULT priv_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
   8.286 +static IA64FAULT priv_mov_to_ar_imm(VCPU * vcpu, INST64 inst)
   8.287  {
   8.288  	// I27 and M30 are identical for these fields
   8.289 -	UINT64 ar3 = inst.M30.ar3;
   8.290 -	UINT64 imm = vcpu_get_gr(vcpu,inst.M30.imm);
   8.291 -	return (vcpu_set_ar(vcpu,ar3,imm));
   8.292 +	u64 ar3 = inst.M30.ar3;
   8.293 +	u64 imm = vcpu_get_gr(vcpu, inst.M30.imm);
   8.294 +	return vcpu_set_ar(vcpu, ar3, imm);
   8.295  }
   8.296  
   8.297 -static IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
   8.298 +static IA64FAULT priv_mov_to_ar_reg(VCPU * vcpu, INST64 inst)
   8.299  {
   8.300  	// I26 and M29 are identical for these fields
   8.301 -	UINT64 ar3 = inst.M29.ar3;
   8.302 +	u64 ar3 = inst.M29.ar3;
   8.303  
   8.304  	if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8) {
   8.305  		// privified mov from kr
   8.306 -		UINT64 val;
   8.307 -		if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
   8.308 -			return vcpu_set_gr(vcpu, inst.M29.r2-64, val,0);
   8.309 -		else return IA64_ILLOP_FAULT;
   8.310 -	}
   8.311 -	else {
   8.312 -		UINT64 r2 = vcpu_get_gr(vcpu,inst.M29.r2);
   8.313 -		return (vcpu_set_ar(vcpu,ar3,r2));
   8.314 +		u64 val;
   8.315 +		if (vcpu_get_ar(vcpu, ar3, &val) != IA64_ILLOP_FAULT)
   8.316 +			return vcpu_set_gr(vcpu, inst.M29.r2 - 64, val, 0);
   8.317 +		else
   8.318 +			return IA64_ILLOP_FAULT;
   8.319 +	} else {
   8.320 +		u64 r2 = vcpu_get_gr(vcpu, inst.M29.r2);
   8.321 +		return vcpu_set_ar(vcpu, ar3, r2);
   8.322  	}
   8.323  }
   8.324  
   8.325 @@ -230,177 +235,205 @@ static IA64FAULT priv_mov_to_ar_reg(VCPU
   8.326   * Moves to privileged registers
   8.327  ********************************/
   8.328  
   8.329 -static IA64FAULT priv_mov_to_pkr(VCPU *vcpu, INST64 inst)
   8.330 +static IA64FAULT priv_mov_to_pkr(VCPU * vcpu, INST64 inst)
   8.331  {
   8.332 -	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   8.333 -	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   8.334 -	return (vcpu_set_pkr(vcpu,r3,r2));
   8.335 +	u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
   8.336 +	u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
   8.337 +	return vcpu_set_pkr(vcpu, r3, r2);
   8.338  }
   8.339  
   8.340 -static IA64FAULT priv_mov_to_rr(VCPU *vcpu, INST64 inst)
   8.341 +static IA64FAULT priv_mov_to_rr(VCPU * vcpu, INST64 inst)
   8.342  {
   8.343 -	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   8.344 -	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   8.345 -	return (vcpu_set_rr(vcpu,r3,r2));
   8.346 +	u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
   8.347 +	u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
   8.348 +	return vcpu_set_rr(vcpu, r3, r2);
   8.349  }
   8.350  
   8.351 -static IA64FAULT priv_mov_to_dbr(VCPU *vcpu, INST64 inst)
   8.352 +static IA64FAULT priv_mov_to_dbr(VCPU * vcpu, INST64 inst)
   8.353  {
   8.354 -	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   8.355 -	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   8.356 -	return (vcpu_set_dbr(vcpu,r3,r2));
   8.357 +	u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
   8.358 +	u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
   8.359 +	return vcpu_set_dbr(vcpu, r3, r2);
   8.360  }
   8.361  
   8.362 -static IA64FAULT priv_mov_to_ibr(VCPU *vcpu, INST64 inst)
   8.363 +static IA64FAULT priv_mov_to_ibr(VCPU * vcpu, INST64 inst)
   8.364  {
   8.365 -	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   8.366 -	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   8.367 -	return (vcpu_set_ibr(vcpu,r3,r2));
   8.368 +	u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
   8.369 +	u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
   8.370 +	return vcpu_set_ibr(vcpu, r3, r2);
   8.371  }
   8.372  
   8.373 -static IA64FAULT priv_mov_to_pmc(VCPU *vcpu, INST64 inst)
   8.374 +static IA64FAULT priv_mov_to_pmc(VCPU * vcpu, INST64 inst)
   8.375  {
   8.376 -	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   8.377 -	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   8.378 -	return (vcpu_set_pmc(vcpu,r3,r2));
   8.379 +	u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
   8.380 +	u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
   8.381 +	return vcpu_set_pmc(vcpu, r3, r2);
   8.382 +}
   8.383 +
   8.384 +static IA64FAULT priv_mov_to_pmd(VCPU * vcpu, INST64 inst)
   8.385 +{
   8.386 +	u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
   8.387 +	u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
   8.388 +	return vcpu_set_pmd(vcpu, r3, r2);
   8.389  }
   8.390  
   8.391 -static IA64FAULT priv_mov_to_pmd(VCPU *vcpu, INST64 inst)
   8.392 +static IA64FAULT priv_mov_to_cr(VCPU * vcpu, INST64 inst)
   8.393  {
   8.394 -	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   8.395 -	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   8.396 -	return (vcpu_set_pmd(vcpu,r3,r2));
   8.397 -}
   8.398 -
   8.399 -static IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
   8.400 -{
   8.401 -	UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
   8.402 +	u64 val = vcpu_get_gr(vcpu, inst.M32.r2);
   8.403  	perfc_incra(mov_to_cr, inst.M32.cr3);
   8.404  	switch (inst.M32.cr3) {
   8.405 -	    case 0: return vcpu_set_dcr(vcpu,val);
   8.406 -	    case 1: return vcpu_set_itm(vcpu,val);
   8.407 -	    case 2: return vcpu_set_iva(vcpu,val);
   8.408 -	    case 8: return vcpu_set_pta(vcpu,val);
   8.409 -	    case 16:return vcpu_set_ipsr(vcpu,val);
   8.410 -	    case 17:return vcpu_set_isr(vcpu,val);
   8.411 -	    case 19:return vcpu_set_iip(vcpu,val);
   8.412 -	    case 20:return vcpu_set_ifa(vcpu,val);
   8.413 -	    case 21:return vcpu_set_itir(vcpu,val);
   8.414 -	    case 22:return vcpu_set_iipa(vcpu,val);
   8.415 -	    case 23:return vcpu_set_ifs(vcpu,val);
   8.416 -	    case 24:return vcpu_set_iim(vcpu,val);
   8.417 -	    case 25:return vcpu_set_iha(vcpu,val);
   8.418 -	    case 64:return vcpu_set_lid(vcpu,val);
   8.419 -	    case 65:return IA64_ILLOP_FAULT;
   8.420 -	    case 66:return vcpu_set_tpr(vcpu,val);
   8.421 -	    case 67:return vcpu_set_eoi(vcpu,val);
   8.422 -	    case 68:return IA64_ILLOP_FAULT;
   8.423 -	    case 69:return IA64_ILLOP_FAULT;
   8.424 -	    case 70:return IA64_ILLOP_FAULT;
   8.425 -	    case 71:return IA64_ILLOP_FAULT;
   8.426 -	    case 72:return vcpu_set_itv(vcpu,val);
   8.427 -	    case 73:return vcpu_set_pmv(vcpu,val);
   8.428 -	    case 74:return vcpu_set_cmcv(vcpu,val);
   8.429 -	    case 80:return vcpu_set_lrr0(vcpu,val);
   8.430 -	    case 81:return vcpu_set_lrr1(vcpu,val);
   8.431 -	    default: return IA64_ILLOP_FAULT;
   8.432 +	case 0:
   8.433 +		return vcpu_set_dcr(vcpu, val);
   8.434 +	case 1:
   8.435 +		return vcpu_set_itm(vcpu, val);
   8.436 +	case 2:
   8.437 +		return vcpu_set_iva(vcpu, val);
   8.438 +	case 8:
   8.439 +		return vcpu_set_pta(vcpu, val);
   8.440 +	case 16:
   8.441 +		return vcpu_set_ipsr(vcpu, val);
   8.442 +	case 17:
   8.443 +		return vcpu_set_isr(vcpu, val);
   8.444 +	case 19:
   8.445 +		return vcpu_set_iip(vcpu, val);
   8.446 +	case 20:
   8.447 +		return vcpu_set_ifa(vcpu, val);
   8.448 +	case 21:
   8.449 +		return vcpu_set_itir(vcpu, val);
   8.450 +	case 22:
   8.451 +		return vcpu_set_iipa(vcpu, val);
   8.452 +	case 23:
   8.453 +		return vcpu_set_ifs(vcpu, val);
   8.454 +	case 24:
   8.455 +		return vcpu_set_iim(vcpu, val);
   8.456 +	case 25:
   8.457 +		return vcpu_set_iha(vcpu, val);
   8.458 +	case 64:
   8.459 +		return vcpu_set_lid(vcpu, val);
   8.460 +	case 65:
   8.461 +		return IA64_ILLOP_FAULT;
   8.462 +	case 66:
   8.463 +		return vcpu_set_tpr(vcpu, val);
   8.464 +	case 67:
   8.465 +		return vcpu_set_eoi(vcpu, val);
   8.466 +	case 68:
   8.467 +		return IA64_ILLOP_FAULT;
   8.468 +	case 69:
   8.469 +		return IA64_ILLOP_FAULT;
   8.470 +	case 70:
   8.471 +		return IA64_ILLOP_FAULT;
   8.472 +	case 71:
   8.473 +		return IA64_ILLOP_FAULT;
   8.474 +	case 72:
   8.475 +		return vcpu_set_itv(vcpu, val);
   8.476 +	case 73:
   8.477 +		return vcpu_set_pmv(vcpu, val);
   8.478 +	case 74:
   8.479 +		return vcpu_set_cmcv(vcpu, val);
   8.480 +	case 80:
   8.481 +		return vcpu_set_lrr0(vcpu, val);
   8.482 +	case 81:
   8.483 +		return vcpu_set_lrr1(vcpu, val);
   8.484 +	default:
   8.485 +		return IA64_ILLOP_FAULT;
   8.486  	}
   8.487  }
   8.488  
   8.489 -static IA64FAULT priv_rsm(VCPU *vcpu, INST64 inst)
   8.490 +static IA64FAULT priv_rsm(VCPU * vcpu, INST64 inst)
   8.491  {
   8.492 -	UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
   8.493 -	return vcpu_reset_psr_sm(vcpu,imm24);
   8.494 +	u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
   8.495 +	return vcpu_reset_psr_sm(vcpu, imm24);
   8.496  }
   8.497  
   8.498 -static IA64FAULT priv_ssm(VCPU *vcpu, INST64 inst)
   8.499 +static IA64FAULT priv_ssm(VCPU * vcpu, INST64 inst)
   8.500  {
   8.501 -	UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
   8.502 -	return vcpu_set_psr_sm(vcpu,imm24);
   8.503 +	u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
   8.504 +	return vcpu_set_psr_sm(vcpu, imm24);
   8.505  }
   8.506  
   8.507  /**
   8.508   * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
   8.509   */
   8.510 -static IA64FAULT priv_mov_to_psr(VCPU *vcpu, INST64 inst)
   8.511 +static IA64FAULT priv_mov_to_psr(VCPU * vcpu, INST64 inst)
   8.512  {
   8.513 -	UINT64 val = vcpu_get_gr(vcpu, inst.M35.r2);
   8.514 -	return vcpu_set_psr_l(vcpu,val);
   8.515 +	u64 val = vcpu_get_gr(vcpu, inst.M35.r2);
   8.516 +	return vcpu_set_psr_l(vcpu, val);
   8.517  }
   8.518  
   8.519  /**********************************
   8.520   * Moves from privileged registers
   8.521   **********************************/
   8.522  
   8.523 -static IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
   8.524 +static IA64FAULT priv_mov_from_rr(VCPU * vcpu, INST64 inst)
   8.525  {
   8.526 -	UINT64 val;
   8.527 +	u64 val;
   8.528  	IA64FAULT fault;
   8.529 -	UINT64 reg;
   8.530 -	
   8.531 -	reg = vcpu_get_gr(vcpu,inst.M43.r3);
   8.532 +	u64 reg;
   8.533 +
   8.534 +	reg = vcpu_get_gr(vcpu, inst.M43.r3);
   8.535  	if (privify_en && inst.M43.r1 > 63) {
   8.536  		// privified mov from cpuid
   8.537 -		fault = vcpu_get_cpuid(vcpu,reg,&val);
   8.538 +		fault = vcpu_get_cpuid(vcpu, reg, &val);
   8.539  		if (fault == IA64_NO_FAULT)
   8.540 -			return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
   8.541 -	}
   8.542 -	else {
   8.543 -		fault = vcpu_get_rr(vcpu,reg,&val);
   8.544 +			return vcpu_set_gr(vcpu, inst.M43.r1 - 64, val, 0);
   8.545 +	} else {
   8.546 +		fault = vcpu_get_rr(vcpu, reg, &val);
   8.547  		if (fault == IA64_NO_FAULT)
   8.548  			return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
   8.549  	}
   8.550  	return fault;
   8.551  }
   8.552  
   8.553 -static IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
   8.554 +static IA64FAULT priv_mov_from_pkr(VCPU * vcpu, INST64 inst)
   8.555  {
   8.556 -	UINT64 val;
   8.557 +	u64 val;
   8.558  	IA64FAULT fault;
   8.559 -	
   8.560 -	fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   8.561 +
   8.562 +	fault = vcpu_get_pkr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val);
   8.563  	if (fault == IA64_NO_FAULT)
   8.564  		return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
   8.565 -	else return fault;
   8.566 +	else
   8.567 +		return fault;
   8.568  }
   8.569  
   8.570 -static IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
   8.571 +static IA64FAULT priv_mov_from_dbr(VCPU * vcpu, INST64 inst)
   8.572  {
   8.573 -	UINT64 val;
   8.574 +	u64 val;
   8.575  	IA64FAULT fault;
   8.576 -	
   8.577 -	fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   8.578 +
   8.579 +	fault = vcpu_get_dbr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val);
   8.580  	if (fault == IA64_NO_FAULT)
   8.581  		return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
   8.582 -	else return fault;
   8.583 +	else
   8.584 +		return fault;
   8.585  }
   8.586  
   8.587 -static IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
   8.588 +static IA64FAULT priv_mov_from_ibr(VCPU * vcpu, INST64 inst)
   8.589  {
   8.590 -	UINT64 val;
   8.591 +	u64 val;
   8.592  	IA64FAULT fault;
   8.593 -	
   8.594 -	fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   8.595 +
   8.596 +	fault = vcpu_get_ibr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val);
   8.597  	if (fault == IA64_NO_FAULT)
   8.598  		return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
   8.599 -	else return fault;
   8.600 +	else
   8.601 +		return fault;
   8.602  }
   8.603  
   8.604 -static IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
   8.605 +static IA64FAULT priv_mov_from_pmc(VCPU * vcpu, INST64 inst)
   8.606  {
   8.607 -	UINT64 val;
   8.608 +	u64 val;
   8.609  	IA64FAULT fault;
   8.610 -	UINT64 reg;
   8.611 -	
   8.612 -	reg = vcpu_get_gr(vcpu,inst.M43.r3);
   8.613 +	u64 reg;
   8.614 +
   8.615 +	reg = vcpu_get_gr(vcpu, inst.M43.r3);
   8.616  	if (privify_en && inst.M43.r1 > 63) {
   8.617  		// privified mov from pmd
   8.618 -		fault = vcpu_get_pmd(vcpu,reg,&val);
   8.619 +		fault = vcpu_get_pmd(vcpu, reg, &val);
   8.620  		if (fault == IA64_NO_FAULT)
   8.621 -			return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
   8.622 -	}
   8.623 -	else {
   8.624 -		fault = vcpu_get_pmc(vcpu,reg,&val);
   8.625 +			return vcpu_set_gr(vcpu, inst.M43.r1 - 64, val, 0);
   8.626 +	} else {
   8.627 +		fault = vcpu_get_pmc(vcpu, reg, &val);
   8.628  		if (fault == IA64_NO_FAULT)
   8.629  			return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
   8.630  	}
   8.631 @@ -410,55 +443,83 @@ static IA64FAULT priv_mov_from_pmc(VCPU 
   8.632  #define cr_get(cr) \
   8.633  	((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
   8.634  		vcpu_set_gr(vcpu, tgt, val, 0) : fault;
   8.635 -	
   8.636 -static IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
   8.637 +
   8.638 +static IA64FAULT priv_mov_from_cr(VCPU * vcpu, INST64 inst)
   8.639  {
   8.640 -	UINT64 tgt = inst.M33.r1;
   8.641 -	UINT64 val;
   8.642 +	u64 tgt = inst.M33.r1;
   8.643 +	u64 val;
   8.644  	IA64FAULT fault;
   8.645  
   8.646  	perfc_incra(mov_from_cr, inst.M33.cr3);
   8.647  	switch (inst.M33.cr3) {
   8.648 -	    case 0: return cr_get(dcr);
   8.649 -	    case 1: return cr_get(itm);
   8.650 -	    case 2: return cr_get(iva);
   8.651 -	    case 8: return cr_get(pta);
   8.652 -	    case 16:return cr_get(ipsr);
   8.653 -	    case 17:return cr_get(isr);
   8.654 -	    case 19:return cr_get(iip);
   8.655 -	    case 20:return cr_get(ifa);
   8.656 -	    case 21:return cr_get(itir);
   8.657 -	    case 22:return cr_get(iipa);
   8.658 -	    case 23:return cr_get(ifs);
   8.659 -	    case 24:return cr_get(iim);
   8.660 -	    case 25:return cr_get(iha);
   8.661 -	    case 64:return cr_get(lid);
   8.662 -	    case 65:return cr_get(ivr);
   8.663 -	    case 66:return cr_get(tpr);
   8.664 -	    case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
   8.665 -	    case 68:return cr_get(irr0);
   8.666 -	    case 69:return cr_get(irr1);
   8.667 -	    case 70:return cr_get(irr2);
   8.668 -	    case 71:return cr_get(irr3);
   8.669 -	    case 72:return cr_get(itv);
   8.670 -	    case 73:return cr_get(pmv);
   8.671 -	    case 74:return cr_get(cmcv);
   8.672 -	    case 80:return cr_get(lrr0);
   8.673 -	    case 81:return cr_get(lrr1);
   8.674 -	    default: return IA64_ILLOP_FAULT;
   8.675 +	case 0:
   8.676 +		return cr_get(dcr);
   8.677 +	case 1:
   8.678 +		return cr_get(itm);
   8.679 +	case 2:
   8.680 +		return cr_get(iva);
   8.681 +	case 8:
   8.682 +		return cr_get(pta);
   8.683 +	case 16:
   8.684 +		return cr_get(ipsr);
   8.685 +	case 17:
   8.686 +		return cr_get(isr);
   8.687 +	case 19:
   8.688 +		return cr_get(iip);
   8.689 +	case 20:
   8.690 +		return cr_get(ifa);
   8.691 +	case 21:
   8.692 +		return cr_get(itir);
   8.693 +	case 22:
   8.694 +		return cr_get(iipa);
   8.695 +	case 23:
   8.696 +		return cr_get(ifs);
   8.697 +	case 24:
   8.698 +		return cr_get(iim);
   8.699 +	case 25:
   8.700 +		return cr_get(iha);
   8.701 +	case 64:
   8.702 +		return cr_get(lid);
   8.703 +	case 65:
   8.704 +		return cr_get(ivr);
   8.705 +	case 66:
   8.706 +		return cr_get(tpr);
   8.707 +	case 67:
   8.708 +		return vcpu_set_gr(vcpu, tgt, 0L, 0);
   8.709 +	case 68:
   8.710 +		return cr_get(irr0);
   8.711 +	case 69:
   8.712 +		return cr_get(irr1);
   8.713 +	case 70:
   8.714 +		return cr_get(irr2);
   8.715 +	case 71:
   8.716 +		return cr_get(irr3);
   8.717 +	case 72:
   8.718 +		return cr_get(itv);
   8.719 +	case 73:
   8.720 +		return cr_get(pmv);
   8.721 +	case 74:
   8.722 +		return cr_get(cmcv);
   8.723 +	case 80:
   8.724 +		return cr_get(lrr0);
   8.725 +	case 81:
   8.726 +		return cr_get(lrr1);
   8.727 +	default:
   8.728 +		return IA64_ILLOP_FAULT;
   8.729  	}
   8.730  	return IA64_ILLOP_FAULT;
   8.731  }
   8.732  
   8.733 -static IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
   8.734 +static IA64FAULT priv_mov_from_psr(VCPU * vcpu, INST64 inst)
   8.735  {
   8.736 -	UINT64 tgt = inst.M33.r1;
   8.737 -	UINT64 val;
   8.738 +	u64 tgt = inst.M33.r1;
   8.739 +	u64 val;
   8.740  	IA64FAULT fault;
   8.741  
   8.742 -	if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
   8.743 +	if ((fault = vcpu_get_psr(vcpu, &val)) == IA64_NO_FAULT)
   8.744  		return vcpu_set_gr(vcpu, tgt, val, 0);
   8.745 -	else return fault;
   8.746 +	else
   8.747 +		return fault;
   8.748  }
   8.749  
   8.750  /**************************************************************************
   8.751 @@ -483,28 +544,28 @@ static const IA64_SLOT_TYPE slot_types[0
   8.752  };
   8.753  
   8.754  // pointer to privileged emulation function
   8.755 -typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
   8.756 +typedef IA64FAULT(*PPEFCN) (VCPU * vcpu, INST64 inst);
   8.757  
   8.758  static const PPEFCN Mpriv_funcs[64] = {
   8.759 -  priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
   8.760 -  priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
   8.761 -  0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
   8.762 -  priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
   8.763 -  priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, priv_mov_from_pkr,
   8.764 -  priv_mov_from_pmc, 0, 0, 0,
   8.765 -  0, 0, 0, 0,
   8.766 -  0, 0, priv_tpa, priv_tak,
   8.767 -  0, 0, 0, 0,
   8.768 -  priv_mov_from_cr, priv_mov_from_psr, 0, 0,
   8.769 -  0, 0, 0, 0,
   8.770 -  priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
   8.771 -  0, 0, 0, 0,
   8.772 -  priv_ptc_e, 0, 0, 0,
   8.773 -  0, 0, 0, 0, 0, 0, 0, 0
   8.774 +	priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
   8.775 +	priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
   8.776 +	0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
   8.777 +	priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
   8.778 +	priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr,
   8.779 +	priv_mov_from_pkr,
   8.780 +	priv_mov_from_pmc, 0, 0, 0,
   8.781 +	0, 0, 0, 0,
   8.782 +	0, 0, priv_tpa, priv_tak,
   8.783 +	0, 0, 0, 0,
   8.784 +	priv_mov_from_cr, priv_mov_from_psr, 0, 0,
   8.785 +	0, 0, 0, 0,
   8.786 +	priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
   8.787 +	0, 0, 0, 0,
   8.788 +	priv_ptc_e, 0, 0, 0,
   8.789 +	0, 0, 0, 0, 0, 0, 0, 0
   8.790  };
   8.791  
   8.792 -static IA64FAULT
   8.793 -priv_handle_op(VCPU *vcpu, REGS *regs, int privlvl)
   8.794 +static IA64FAULT priv_handle_op(VCPU * vcpu, REGS * regs, int privlvl)
   8.795  {
   8.796  	IA64_BUNDLE bundle;
   8.797  	int slot;
   8.798 @@ -512,85 +573,97 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
   8.799  	INST64 inst;
   8.800  	PPEFCN pfunc;
   8.801  	unsigned long ipsr = regs->cr_ipsr;
   8.802 -	UINT64 iip = regs->cr_iip;
   8.803 +	u64 iip = regs->cr_iip;
   8.804  	int x6;
   8.805 -	
   8.806 +
   8.807  	// make a local copy of the bundle containing the privop
   8.808  	if (!vcpu_get_domain_bundle(vcpu, regs, iip, &bundle)) {
   8.809  		//return vcpu_force_data_miss(vcpu, regs->cr_iip);
   8.810  		return vcpu_force_inst_miss(vcpu, regs->cr_iip);
   8.811  	}
   8.812 -
   8.813  #if 0
   8.814 -	if (iip==0xa000000100001820) {
   8.815 +	if (iip == 0xa000000100001820) {
   8.816  		static int firstpagefault = 1;
   8.817  		if (firstpagefault) {
   8.818 -			printf("*** First time to domain page fault!\n");				firstpagefault=0;
   8.819 +			printf("*** First time to domain page fault!\n");
   8.820 +			firstpagefault = 0;
   8.821  		}
   8.822  	}
   8.823  #endif
   8.824  	if (privop_trace) {
   8.825  		static long i = 400;
   8.826  		//if (i > 0) printf("priv_handle_op: at 0x%lx\n",iip);
   8.827 -		if (i > 0) printf("priv_handle_op: privop trace at 0x%lx, itc=%lx, itm=%lx\n",
   8.828 -			iip,ia64_get_itc(),ia64_get_itm());
   8.829 +		if (i > 0)
   8.830 +			printf("priv_handle_op: privop trace at 0x%lx, "
   8.831 +			       "itc=%lx, itm=%lx\n",
   8.832 +			       iip, ia64_get_itc(), ia64_get_itm());
   8.833  		i--;
   8.834  	}
   8.835  	slot = ((struct ia64_psr *)&ipsr)->ri;
   8.836 -	if (!slot) inst.inst = (bundle.i64[0]>>5) & MASK_41;
   8.837 +	if (!slot)
   8.838 +		inst.inst = (bundle.i64[0] >> 5) & MASK_41;
   8.839  	else if (slot == 1)
   8.840 -		inst.inst = ((bundle.i64[0]>>46) | bundle.i64[1]<<18) & MASK_41;
   8.841 -	else if (slot == 2) inst.inst = (bundle.i64[1]>>23) & MASK_41; 
   8.842 -	else printf("priv_handle_op: illegal slot: %d\n", slot);
   8.843 +		inst.inst =
   8.844 +		    ((bundle.i64[0] >> 46) | bundle.i64[1] << 18) & MASK_41;
   8.845 +	else if (slot == 2)
   8.846 +		inst.inst = (bundle.i64[1] >> 23) & MASK_41;
   8.847 +	else
   8.848 +		printf("priv_handle_op: illegal slot: %d\n", slot);
   8.849  
   8.850  	slot_type = slot_types[bundle.template][slot];
   8.851  	if (priv_verbose) {
   8.852 -		printf("priv_handle_op: checking bundle at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
   8.853 -		 iip, (UINT64)inst.inst, slot, slot_type);
   8.854 +		printf("priv_handle_op: checking bundle at 0x%lx "
   8.855 +		       "(op=0x%016lx) slot %d (type=%d)\n",
   8.856 +		       iip, (u64) inst.inst, slot, slot_type);
   8.857  	}
   8.858  	if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
   8.859  		// break instr for privified cover
   8.860 -	}
   8.861 -	else if (privlvl != 2) return (IA64_ILLOP_FAULT);
   8.862 +	} else if (privlvl != 2)
   8.863 +		return IA64_ILLOP_FAULT;
   8.864  	switch (slot_type) {
   8.865 -	    case M:
   8.866 +	case M:
   8.867  		if (inst.generic.major == 0) {
   8.868  #if 0
   8.869  			if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
   8.870  				privcnt.cover++;
   8.871 -				return priv_cover(vcpu,inst);
   8.872 +				return priv_cover(vcpu, inst);
   8.873  			}
   8.874  #endif
   8.875 -			if (inst.M29.x3 != 0) break;
   8.876 +			if (inst.M29.x3 != 0)
   8.877 +				break;
   8.878  			if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
   8.879  				perfc_incrc(mov_to_ar_imm);
   8.880 -				return priv_mov_to_ar_imm(vcpu,inst);
   8.881 +				return priv_mov_to_ar_imm(vcpu, inst);
   8.882  			}
   8.883  			if (inst.M44.x4 == 6) {
   8.884  				perfc_incrc(ssm);
   8.885 -				return priv_ssm(vcpu,inst);
   8.886 +				return priv_ssm(vcpu, inst);
   8.887  			}
   8.888  			if (inst.M44.x4 == 7) {
   8.889  				perfc_incrc(rsm);
   8.890 -				return priv_rsm(vcpu,inst);
   8.891 +				return priv_rsm(vcpu, inst);
   8.892  			}
   8.893  			break;
   8.894 -		}
   8.895 -		else if (inst.generic.major != 1) break;
   8.896 +		} else if (inst.generic.major != 1)
   8.897 +			break;
   8.898  		x6 = inst.M29.x6;
   8.899  		if (x6 == 0x2a) {
   8.900  			if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
   8.901  				perfc_incrc(mov_from_ar); // privified mov from kr
   8.902  			else
   8.903  				perfc_incrc(mov_to_ar_reg);
   8.904 -			return priv_mov_to_ar_reg(vcpu,inst);
   8.905 +			return priv_mov_to_ar_reg(vcpu, inst);
   8.906  		}
   8.907 -		if (inst.M29.x3 != 0) break;
   8.908 -		if (!(pfunc = Mpriv_funcs[x6])) break;
   8.909 -		if (x6 == 0x1e || x6 == 0x1f)  { // tpa or tak are "special"
   8.910 +		if (inst.M29.x3 != 0)
   8.911 +			break;
   8.912 +		if (!(pfunc = Mpriv_funcs[x6]))
   8.913 +			break;
   8.914 +		if (x6 == 0x1e || x6 == 0x1f) {	// tpa or tak are "special"
   8.915  			if (privify_en && inst.M46.r3 > 63) {
   8.916 -				if (x6 == 0x1e) x6 = 0x1b;
   8.917 -				else x6 = 0x1a;
   8.918 +				if (x6 == 0x1e)
   8.919 +					x6 = 0x1b;
   8.920 +				else
   8.921 +					x6 = 0x1a;
   8.922  			}
   8.923  		}
   8.924  		if (privify_en && x6 == 52 && inst.M28.r3 > 63)
   8.925 @@ -599,61 +672,66 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
   8.926  			perfc_incrc(cpuid);
   8.927  		else
   8.928  			perfc_incra(misc_privop, x6);
   8.929 -		return (*pfunc)(vcpu,inst);
   8.930 +		return (*pfunc) (vcpu, inst);
   8.931  		break;
   8.932 -	    case B:
   8.933 -		if (inst.generic.major != 0) break;
   8.934 +	case B:
   8.935 +		if (inst.generic.major != 0)
   8.936 +			break;
   8.937  		if (inst.B8.x6 == 0x08) {
   8.938  			IA64FAULT fault;
   8.939  			perfc_incrc(rfi);
   8.940 -			fault = priv_rfi(vcpu,inst);
   8.941 -			if (fault == IA64_NO_FAULT) fault = IA64_RFI_IN_PROGRESS;
   8.942 +			fault = priv_rfi(vcpu, inst);
   8.943 +			if (fault == IA64_NO_FAULT)
   8.944 +				fault = IA64_RFI_IN_PROGRESS;
   8.945  			return fault;
   8.946  		}
   8.947  		if (inst.B8.x6 == 0x0c) {
   8.948  			perfc_incrc(bsw0);
   8.949 -			return priv_bsw0(vcpu,inst);
   8.950 +			return priv_bsw0(vcpu, inst);
   8.951  		}
   8.952  		if (inst.B8.x6 == 0x0d) {
   8.953  			perfc_incrc(bsw1);
   8.954 -			return priv_bsw1(vcpu,inst);
   8.955 +			return priv_bsw1(vcpu, inst);
   8.956  		}
   8.957  		if (inst.B8.x6 == 0x0) {
   8.958  			// break instr for privified cover
   8.959  			perfc_incrc(cover);
   8.960 -			return priv_cover(vcpu,inst);
   8.961 +			return priv_cover(vcpu, inst);
   8.962  		}
   8.963  		break;
   8.964 -	    case I:
   8.965 -		if (inst.generic.major != 0) break;
   8.966 +	case I:
   8.967 +		if (inst.generic.major != 0)
   8.968 +			break;
   8.969  #if 0
   8.970  		if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
   8.971  			perfc_incrc(cover);
   8.972 -			return priv_cover(vcpu,inst);
   8.973 +			return priv_cover(vcpu, inst);
   8.974  		}
   8.975  #endif
   8.976 -		if (inst.I26.x3 != 0) break;  // I26.x3 == I27.x3
   8.977 +		if (inst.I26.x3 != 0)
   8.978 +			break;	// I26.x3 == I27.x3
   8.979  		if (inst.I26.x6 == 0x2a) {
   8.980  			if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
   8.981 -				perfc_incrc(mov_from_ar); // privified mov from kr
   8.982 -			else 
   8.983 +				perfc_incrc(mov_from_ar);	// privified mov from kr
   8.984 +			else
   8.985  				perfc_incrc(mov_to_ar_reg);
   8.986 -			return priv_mov_to_ar_reg(vcpu,inst);
   8.987 +			return priv_mov_to_ar_reg(vcpu, inst);
   8.988  		}
   8.989  		if (inst.I27.x6 == 0x0a) {
   8.990  			perfc_incrc(mov_to_ar_imm);
   8.991 -			return priv_mov_to_ar_imm(vcpu,inst);
   8.992 +			return priv_mov_to_ar_imm(vcpu, inst);
   8.993  		}
   8.994  		break;
   8.995 -	    default:
   8.996 +	default:
   8.997  		break;
   8.998  	}
   8.999 -        //printf("We who are about do die salute you\n");
  8.1000 -	printf("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d), ipsr=0x%lx\n",
  8.1001 -		 iip, (UINT64)inst.inst, slot, slot_type, ipsr);
  8.1002 -        //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
  8.1003 -        //thread_mozambique("privop fault\n");
  8.1004 -	return (IA64_ILLOP_FAULT);
  8.1005 +	//printf("We who are about do die salute you\n");
  8.1006 +	printf("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) "
  8.1007 +	       "slot %d (type=%d), ipsr=0x%lx\n",
  8.1008 +	       iip, (u64) inst.inst, slot, slot_type, ipsr);
  8.1009 +	//printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
  8.1010 +	//thread_mozambique("privop fault\n");
  8.1011 +	return IA64_ILLOP_FAULT;
  8.1012  }
  8.1013  
  8.1014  /** Emulate a privileged operation.
  8.1015 @@ -666,142 +744,139 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
  8.1016   * @param isrcode interrupt service routine code
  8.1017   * @return fault
  8.1018   */
  8.1019 -IA64FAULT
  8.1020 -priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
  8.1021 +IA64FAULT priv_emulate(VCPU * vcpu, REGS * regs, u64 isr)
  8.1022  {
  8.1023  	IA64FAULT fault;
  8.1024 -	UINT64 ipsr = regs->cr_ipsr;
  8.1025 -	UINT64 isrcode = (isr >> 4) & 0xf;
  8.1026 +	u64 ipsr = regs->cr_ipsr;
  8.1027 +	u64 isrcode = (isr >> 4) & 0xf;
  8.1028  	int privlvl;
  8.1029  
  8.1030  	// handle privops masked as illops? and breaks (6)
  8.1031  	if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
  8.1032 -        	printf("priv_emulate: isrcode != 0 or 1 or 2\n");
  8.1033 +		printf("priv_emulate: isrcode != 0 or 1 or 2\n");
  8.1034  		printf("priv_emulate: returning ILLOP, not implemented!\n");
  8.1035 -		while (1);
  8.1036 +		while (1) ;
  8.1037  		return IA64_ILLOP_FAULT;
  8.1038  	}
  8.1039  	//if (isrcode != 1 && isrcode != 2) return 0;
  8.1040  	privlvl = ia64_get_cpl(ipsr);
  8.1041  	// its OK for a privified-cover to be executed in user-land
  8.1042 -	fault = priv_handle_op(vcpu,regs,privlvl);
  8.1043 -	if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) { // success!!
  8.1044 +	fault = priv_handle_op(vcpu, regs, privlvl);
  8.1045 +	if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) {
  8.1046 +		// success!!
  8.1047  		// update iip/ipsr to point to the next instruction
  8.1048  		(void)vcpu_increment_iip(vcpu);
  8.1049  	}
  8.1050  	if (fault == IA64_ILLOP_FAULT)
  8.1051  		printf("priv_emulate: priv_handle_op fails, "
  8.1052 -		       "isr=0x%lx iip=%lx\n",isr, regs->cr_iip);
  8.1053 +		       "isr=0x%lx iip=%lx\n", isr, regs->cr_iip);
  8.1054  	return fault;
  8.1055  }
  8.1056  
  8.1057  /* hyperprivops are generally executed in assembly (with physical psr.ic off)
  8.1058   * so this code is primarily used for debugging them */
  8.1059 -int
  8.1060 -ia64_hyperprivop(unsigned long iim, REGS *regs)
  8.1061 +int ia64_hyperprivop(unsigned long iim, REGS * regs)
  8.1062  {
  8.1063  	struct vcpu *v = current;
  8.1064 -	UINT64 val;
  8.1065 -	UINT64 itir, ifa;
  8.1066 +	u64 val;
  8.1067 +	u64 itir, ifa;
  8.1068  
  8.1069  	if (!iim || iim > HYPERPRIVOP_MAX) {
  8.1070  		panic_domain(regs, "bad hyperprivop: iim=%lx, iip=0x%lx\n",
  8.1071 -		             iim, regs->cr_iip);
  8.1072 +			     iim, regs->cr_iip);
  8.1073  		return 1;
  8.1074  	}
  8.1075  	perfc_incra(slow_hyperprivop, iim);
  8.1076 -	switch(iim) {
  8.1077 -	    case HYPERPRIVOP_RFI:
  8.1078 -		(void)vcpu_rfi(v);
  8.1079 +	switch (iim) {
  8.1080 +	case HYPERPRIVOP_RFI:
  8.1081 +		vcpu_rfi(v);
  8.1082  		return 0;	// don't update iip
  8.1083 -	    case HYPERPRIVOP_RSM_DT:
  8.1084 -		(void)vcpu_reset_psr_dt(v);
  8.1085 +	case HYPERPRIVOP_RSM_DT:
  8.1086 +		vcpu_reset_psr_dt(v);
  8.1087  		return 1;
  8.1088 -	    case HYPERPRIVOP_SSM_DT:
  8.1089 -		(void)vcpu_set_psr_dt(v);
  8.1090 +	case HYPERPRIVOP_SSM_DT:
  8.1091 +		vcpu_set_psr_dt(v);
  8.1092  		return 1;
  8.1093 -	    case HYPERPRIVOP_COVER:
  8.1094 -		(void)vcpu_cover(v);
  8.1095 +	case HYPERPRIVOP_COVER:
  8.1096 +		vcpu_cover(v);
  8.1097  		return 1;
  8.1098 -	    case HYPERPRIVOP_ITC_D:
  8.1099 -		(void)vcpu_get_itir(v,&itir);
  8.1100 -		(void)vcpu_get_ifa(v,&ifa);
  8.1101 -		(void)vcpu_itc_d(v,regs->r8,itir,ifa);
  8.1102 +	case HYPERPRIVOP_ITC_D:
  8.1103 +		vcpu_get_itir(v, &itir);
  8.1104 +		vcpu_get_ifa(v, &ifa);
  8.1105 +		vcpu_itc_d(v, regs->r8, itir, ifa);
  8.1106  		return 1;
  8.1107 -	    case HYPERPRIVOP_ITC_I:
  8.1108 -		(void)vcpu_get_itir(v,&itir);
  8.1109 -		(void)vcpu_get_ifa(v,&ifa);
  8.1110 -		(void)vcpu_itc_i(v,regs->r8,itir,ifa);
  8.1111 +	case HYPERPRIVOP_ITC_I:
  8.1112 +		vcpu_get_itir(v, &itir);
  8.1113 +		vcpu_get_ifa(v, &ifa);
  8.1114 +		vcpu_itc_i(v, regs->r8, itir, ifa);
  8.1115  		return 1;
  8.1116 -	    case HYPERPRIVOP_SSM_I:
  8.1117 -		(void)vcpu_set_psr_i(v);
  8.1118 +	case HYPERPRIVOP_SSM_I:
  8.1119 +		vcpu_set_psr_i(v);
  8.1120  		return 1;
  8.1121 -	    case HYPERPRIVOP_GET_IVR:
  8.1122 -		(void)vcpu_get_ivr(v,&val);
  8.1123 +	case HYPERPRIVOP_GET_IVR:
  8.1124 +		vcpu_get_ivr(v, &val);
  8.1125  		regs->r8 = val;
  8.1126  		return 1;
  8.1127 -	    case HYPERPRIVOP_GET_TPR:
  8.1128 -		(void)vcpu_get_tpr(v,&val);
  8.1129 +	case HYPERPRIVOP_GET_TPR:
  8.1130 +		vcpu_get_tpr(v, &val);
  8.1131  		regs->r8 = val;
  8.1132  		return 1;
  8.1133 -	    case HYPERPRIVOP_SET_TPR:
  8.1134 -		(void)vcpu_set_tpr(v,regs->r8);
  8.1135 +	case HYPERPRIVOP_SET_TPR:
  8.1136 +		vcpu_set_tpr(v, regs->r8);
  8.1137  		return 1;
  8.1138 -	    case HYPERPRIVOP_EOI:
  8.1139 -		(void)vcpu_set_eoi(v,0L);
  8.1140 +	case HYPERPRIVOP_EOI:
  8.1141 +		vcpu_set_eoi(v, 0L);
  8.1142  		return 1;
  8.1143 -	    case HYPERPRIVOP_SET_ITM:
  8.1144 -		(void)vcpu_set_itm(v,regs->r8);
  8.1145 +	case HYPERPRIVOP_SET_ITM:
  8.1146 +		vcpu_set_itm(v, regs->r8);
  8.1147  		return 1;
  8.1148 -	    case HYPERPRIVOP_THASH:
  8.1149 -		(void)vcpu_thash(v,regs->r8,&val);
  8.1150 +	case HYPERPRIVOP_THASH:
  8.1151 +		vcpu_thash(v, regs->r8, &val);
  8.1152  		regs->r8 = val;
  8.1153  		return 1;
  8.1154 -	    case HYPERPRIVOP_PTC_GA:
  8.1155 -		(void)vcpu_ptc_ga(v,regs->r8,(1L << ((regs->r9 & 0xfc) >> 2)));
  8.1156 +	case HYPERPRIVOP_PTC_GA:
  8.1157 +		vcpu_ptc_ga(v, regs->r8, (1L << ((regs->r9 & 0xfc) >> 2)));
  8.1158  		return 1;
  8.1159 -	    case HYPERPRIVOP_ITR_D:
  8.1160 -		(void)vcpu_get_itir(v,&itir);
  8.1161 -		(void)vcpu_get_ifa(v,&ifa);
  8.1162 -		(void)vcpu_itr_d(v,regs->r8,regs->r9,itir,ifa);
  8.1163 +	case HYPERPRIVOP_ITR_D:
  8.1164 +		vcpu_get_itir(v, &itir);
  8.1165 +		vcpu_get_ifa(v, &ifa);
  8.1166 +		vcpu_itr_d(v, regs->r8, regs->r9, itir, ifa);
  8.1167  		return 1;
  8.1168 -	    case HYPERPRIVOP_GET_RR:
  8.1169 -		(void)vcpu_get_rr(v,regs->r8,&val);
  8.1170 +	case HYPERPRIVOP_GET_RR:
  8.1171 +		vcpu_get_rr(v, regs->r8, &val);
  8.1172  		regs->r8 = val;
  8.1173  		return 1;
  8.1174 -	    case HYPERPRIVOP_SET_RR:
  8.1175 -		(void)vcpu_set_rr(v,regs->r8,regs->r9);
  8.1176 +	case HYPERPRIVOP_SET_RR:
  8.1177 +		vcpu_set_rr(v, regs->r8, regs->r9);
  8.1178  		return 1;
  8.1179 -	    case HYPERPRIVOP_SET_KR:
  8.1180 -		(void)vcpu_set_ar(v,regs->r8,regs->r9);
  8.1181 +	case HYPERPRIVOP_SET_KR:
  8.1182 +		vcpu_set_ar(v, regs->r8, regs->r9);
  8.1183  		return 1;
  8.1184 -	    case HYPERPRIVOP_FC:
  8.1185 -		(void)vcpu_fc(v,regs->r8);
  8.1186 +	case HYPERPRIVOP_FC:
  8.1187 +		vcpu_fc(v, regs->r8);
  8.1188  		return 1;
  8.1189 -	    case HYPERPRIVOP_GET_CPUID:
  8.1190 -		(void)vcpu_get_cpuid(v,regs->r8,&val);
  8.1191 +	case HYPERPRIVOP_GET_CPUID:
  8.1192 +		vcpu_get_cpuid(v, regs->r8, &val);
  8.1193  		regs->r8 = val;
  8.1194  		return 1;
  8.1195 -	    case HYPERPRIVOP_GET_PMD:
  8.1196 -		(void)vcpu_get_pmd(v,regs->r8,&val);
  8.1197 +	case HYPERPRIVOP_GET_PMD:
  8.1198 +		vcpu_get_pmd(v, regs->r8, &val);
  8.1199  		regs->r8 = val;
  8.1200  		return 1;
  8.1201 -	    case HYPERPRIVOP_GET_EFLAG:
  8.1202 -		(void)vcpu_get_ar(v,24,&val);
  8.1203 +	case HYPERPRIVOP_GET_EFLAG:
  8.1204 +		vcpu_get_ar(v, 24, &val);
  8.1205  		regs->r8 = val;
  8.1206  		return 1;
  8.1207 -	    case HYPERPRIVOP_SET_EFLAG:
  8.1208 -		(void)vcpu_set_ar(v,24,regs->r8);
  8.1209 +	case HYPERPRIVOP_SET_EFLAG:
  8.1210 +		vcpu_set_ar(v, 24, regs->r8);
  8.1211  		return 1;
  8.1212 -	    case HYPERPRIVOP_RSM_BE:
  8.1213 -		(void)vcpu_reset_psr_sm(v, IA64_PSR_BE);
  8.1214 +	case HYPERPRIVOP_RSM_BE:
  8.1215 +		vcpu_reset_psr_sm(v, IA64_PSR_BE);
  8.1216  		return 1;
  8.1217 -	    case HYPERPRIVOP_GET_PSR:
  8.1218 -		(void)vcpu_get_psr(v, &val);
  8.1219 +	case HYPERPRIVOP_GET_PSR:
  8.1220 +		vcpu_get_psr(v, &val);
  8.1221  		regs->r8 = val;
  8.1222  		return 1;
  8.1223  	}
  8.1224  	return 0;
  8.1225  }
  8.1226 -
  8.1227 -
     9.1 --- a/xen/arch/ia64/xen/vcpu.c	Tue Oct 17 14:30:36 2006 -0600
     9.2 +++ b/xen/arch/ia64/xen/vcpu.c	Tue Oct 17 15:43:41 2006 -0600
     9.3 @@ -28,27 +28,31 @@
     9.4  #include <asm/tlb_track.h>
     9.5  
     9.6  /* FIXME: where these declarations should be there ? */
     9.7 -extern void getreg(unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs);
     9.8 -extern void setreg(unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs);
     9.9 -extern void getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs);
    9.10 +extern void getreg(unsigned long regnum, unsigned long *val, int *nat,
    9.11 +                   struct pt_regs *regs);
    9.12 +extern void setreg(unsigned long regnum, unsigned long val, int nat,
    9.13 +                   struct pt_regs *regs);
    9.14 +extern void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
    9.15 +                     struct pt_regs *regs);
    9.16  
    9.17 -extern void setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs);
    9.18 +extern void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
    9.19 +                     struct pt_regs *regs);
    9.20  
    9.21 -typedef	union {
    9.22 +typedef union {
    9.23  	struct ia64_psr ia64_psr;
    9.24  	unsigned long i64;
    9.25  } PSR;
    9.26  
    9.27  // this def for vcpu_regs won't work if kernel stack is present
    9.28 -//#define	vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
    9.29 +//#define       vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
    9.30  
    9.31 -#define	TRUE	1
    9.32 -#define	FALSE	0
    9.33 +#define	TRUE			1
    9.34 +#define	FALSE			0
    9.35  #define	IA64_PTA_SZ_BIT		2
    9.36  #define	IA64_PTA_VF_BIT		8
    9.37  #define	IA64_PTA_BASE_BIT	15
    9.38  #define	IA64_PTA_LFMT		(1UL << IA64_PTA_VF_BIT)
    9.39 -#define	IA64_PTA_SZ(x)	(x##UL << IA64_PTA_SZ_BIT)
    9.40 +#define	IA64_PTA_SZ(x)		(x##UL << IA64_PTA_SZ_BIT)
    9.41  
    9.42  unsigned long vcpu_verbose = 0;
    9.43  
    9.44 @@ -56,23 +60,23 @@ unsigned long vcpu_verbose = 0;
    9.45   VCPU general register access routines
    9.46  **************************************************************************/
    9.47  #ifdef XEN
    9.48 -UINT64
    9.49 -vcpu_get_gr(VCPU *vcpu, unsigned long reg)
    9.50 +u64 vcpu_get_gr(VCPU * vcpu, unsigned long reg)
    9.51  {
    9.52  	REGS *regs = vcpu_regs(vcpu);
    9.53 -	UINT64 val;
    9.54 +	u64 val;
    9.55  
    9.56 -	if (!reg) return 0;
    9.57 -	getreg(reg,&val,0,regs);	// FIXME: handle NATs later
    9.58 +	if (!reg)
    9.59 +		return 0;
    9.60 +	getreg(reg, &val, 0, regs);	// FIXME: handle NATs later
    9.61  	return val;
    9.62  }
    9.63 -IA64FAULT
    9.64 -vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val)
    9.65 +
    9.66 +IA64FAULT vcpu_get_gr_nat(VCPU * vcpu, unsigned long reg, u64 * val)
    9.67  {
    9.68  	REGS *regs = vcpu_regs(vcpu);
    9.69  	int nat;
    9.70  
    9.71 -	getreg(reg,val,&nat,regs);	// FIXME: handle NATs later
    9.72 +	getreg(reg, val, &nat, regs);	// FIXME: handle NATs later
    9.73  	if (nat)
    9.74  		return IA64_NAT_CONSUMPTION_VECTOR;
    9.75  	return 0;
    9.76 @@ -81,32 +85,33 @@ vcpu_get_gr_nat(VCPU *vcpu, unsigned lon
    9.77  // returns:
    9.78  //   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
    9.79  //   IA64_NO_FAULT otherwise
    9.80 -IA64FAULT
    9.81 -vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int nat)
    9.82 +IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value, int nat)
    9.83  {
    9.84  	REGS *regs = vcpu_regs(vcpu);
    9.85  	long sof = (regs->cr_ifs) & 0x7f;
    9.86  
    9.87 -	if (!reg) return IA64_ILLOP_FAULT;
    9.88 -	if (reg >= sof + 32) return IA64_ILLOP_FAULT;
    9.89 -	setreg(reg,value,nat,regs);	// FIXME: handle NATs later
    9.90 +	if (!reg)
    9.91 +		return IA64_ILLOP_FAULT;
    9.92 +	if (reg >= sof + 32)
    9.93 +		return IA64_ILLOP_FAULT;
    9.94 +	setreg(reg, value, nat, regs);	// FIXME: handle NATs later
    9.95  	return IA64_NO_FAULT;
    9.96  }
    9.97  
    9.98  IA64FAULT
    9.99 -vcpu_get_fpreg(VCPU *vcpu, unsigned long reg, struct ia64_fpreg *val)
   9.100 +vcpu_get_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
   9.101  {
   9.102  	REGS *regs = vcpu_regs(vcpu);
   9.103 -	getfpreg(reg,val,regs);	// FIXME: handle NATs later
   9.104 +	getfpreg(reg, val, regs);	// FIXME: handle NATs later
   9.105  	return IA64_NO_FAULT;
   9.106  }
   9.107  
   9.108  IA64FAULT
   9.109 -vcpu_set_fpreg(VCPU *vcpu, unsigned long reg, struct ia64_fpreg *val)
   9.110 +vcpu_set_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
   9.111  {
   9.112  	REGS *regs = vcpu_regs(vcpu);
   9.113 -	if(reg > 1)
   9.114 -		setfpreg(reg,val,regs);	// FIXME: handle NATs later
   9.115 +	if (reg > 1)
   9.116 +		setfpreg(reg, val, regs);	// FIXME: handle NATs later
   9.117  	return IA64_NO_FAULT;
   9.118  }
   9.119  
   9.120 @@ -114,38 +119,39 @@ vcpu_set_fpreg(VCPU *vcpu, unsigned long
   9.121  // returns:
   9.122  //   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
   9.123  //   IA64_NO_FAULT otherwise
   9.124 -IA64FAULT
   9.125 -vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value)
   9.126 +IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value)
   9.127  {
   9.128  	REGS *regs = vcpu_regs(vcpu);
   9.129  	long sof = (regs->cr_ifs) & 0x7f;
   9.130  
   9.131 -	if (!reg) return IA64_ILLOP_FAULT;
   9.132 -	if (reg >= sof + 32) return IA64_ILLOP_FAULT;
   9.133 -	setreg(reg,value,0,regs);	// FIXME: handle NATs later
   9.134 +	if (!reg)
   9.135 +		return IA64_ILLOP_FAULT;
   9.136 +	if (reg >= sof + 32)
   9.137 +		return IA64_ILLOP_FAULT;
   9.138 +	setreg(reg, value, 0, regs);	// FIXME: handle NATs later
   9.139  	return IA64_NO_FAULT;
   9.140  }
   9.141  
   9.142  #endif
   9.143  
   9.144 -void vcpu_init_regs (struct vcpu *v)
   9.145 +void vcpu_init_regs(struct vcpu *v)
   9.146  {
   9.147  	struct pt_regs *regs;
   9.148  
   9.149 -	regs = vcpu_regs (v);
   9.150 +	regs = vcpu_regs(v);
   9.151  	if (VMX_DOMAIN(v)) {
   9.152  		/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
   9.153  		/* Need to be expanded as macro */
   9.154  		regs->cr_ipsr = 0x501008826008;
   9.155  	} else {
   9.156  		regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
   9.157 -		  | IA64_PSR_BITS_TO_SET | IA64_PSR_BN;
   9.158 +		    | IA64_PSR_BITS_TO_SET | IA64_PSR_BN;
   9.159  		regs->cr_ipsr &= ~(IA64_PSR_BITS_TO_CLEAR
   9.160  				   | IA64_PSR_RI | IA64_PSR_IS);
   9.161  		// domain runs at PL2
   9.162  		regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
   9.163  	}
   9.164 -	regs->cr_ifs = 1UL << 63; /* or clear? */
   9.165 +	regs->cr_ifs = 1UL << 63;	/* or clear? */
   9.166  	regs->ar_fpsr = FPSR_DEFAULT;
   9.167  
   9.168  	if (VMX_DOMAIN(v)) {
   9.169 @@ -155,13 +161,13 @@ void vcpu_init_regs (struct vcpu *v)
   9.170  		VCPU(v, dcr) = 0;
   9.171  	} else {
   9.172  		init_all_rr(v);
   9.173 -		regs->ar_rsc |= (2 << 2); /* force PL2/3 */
   9.174 +		regs->ar_rsc |= (2 << 2);	/* force PL2/3 */
   9.175  		VCPU(v, banknum) = 1;
   9.176  		VCPU(v, metaphysical_mode) = 1;
   9.177  		VCPU(v, interrupt_mask_addr) =
   9.178 -		             (unsigned char *)v->domain->arch.shared_info_va +
   9.179 -		             INT_ENABLE_OFFSET(v);
   9.180 -		VCPU(v, itv) = (1 << 16); /* timer vector masked */
   9.181 +		    (unsigned char *)v->domain->arch.shared_info_va +
   9.182 +		    INT_ENABLE_OFFSET(v);
   9.183 +		VCPU(v, itv) = (1 << 16);	/* timer vector masked */
   9.184  	}
   9.185  
   9.186  	v->arch.domain_itm_last = -1L;
   9.187 @@ -171,7 +177,7 @@ void vcpu_init_regs (struct vcpu *v)
   9.188   VCPU privileged application register access routines
   9.189  **************************************************************************/
   9.190  
   9.191 -void vcpu_load_kernel_regs(VCPU *vcpu)
   9.192 +void vcpu_load_kernel_regs(VCPU * vcpu)
   9.193  {
   9.194  	ia64_set_kr(0, VCPU(vcpu, krs[0]));
   9.195  	ia64_set_kr(1, VCPU(vcpu, krs[1]));
   9.196 @@ -186,26 +192,33 @@ void vcpu_load_kernel_regs(VCPU *vcpu)
   9.197  /* GCC 4.0.2 seems not to be able to suppress this call!.  */
   9.198  #define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
   9.199  
   9.200 -IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
   9.201 +IA64FAULT vcpu_set_ar(VCPU * vcpu, u64 reg, u64 val)
   9.202  {
   9.203 -	if (reg == 44) return (vcpu_set_itc(vcpu,val));
   9.204 -	else if (reg == 27) return (IA64_ILLOP_FAULT);
   9.205 +	if (reg == 44)
   9.206 +		return vcpu_set_itc(vcpu, val);
   9.207 +	else if (reg == 27)
   9.208 +		return IA64_ILLOP_FAULT;
   9.209  	else if (reg == 24)
   9.210 -	    printf("warning: setting ar.eflg is a no-op; no IA-32 support\n");
   9.211 -	else if (reg > 7) return (IA64_ILLOP_FAULT);
   9.212 +		printf("warning: setting ar.eflg is a no-op; no IA-32 "
   9.213 +		       "support\n");
   9.214 +	else if (reg > 7)
   9.215 +		return IA64_ILLOP_FAULT;
   9.216  	else {
   9.217 -		PSCB(vcpu,krs[reg]) = val;
   9.218 -		ia64_set_kr(reg,val);
   9.219 +		PSCB(vcpu, krs[reg]) = val;
   9.220 +		ia64_set_kr(reg, val);
   9.221  	}
   9.222  	return IA64_NO_FAULT;
   9.223  }
   9.224  
   9.225 -IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
   9.226 +IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 reg, u64 * val)
   9.227  {
   9.228  	if (reg == 24)
   9.229 -	    printf("warning: getting ar.eflg is a no-op; no IA-32 support\n");
   9.230 -	else if (reg > 7) return (IA64_ILLOP_FAULT);
   9.231 -	else *val = PSCB(vcpu,krs[reg]);
   9.232 +		printf("warning: getting ar.eflg is a no-op; no IA-32 "
   9.233 +		       "support\n");
   9.234 +	else if (reg > 7)
   9.235 +		return IA64_ILLOP_FAULT;
   9.236 +	else
   9.237 +		*val = PSCB(vcpu, krs[reg]);
   9.238  	return IA64_NO_FAULT;
   9.239  }
   9.240  
   9.241 @@ -213,24 +226,25 @@ IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64
   9.242   VCPU processor status register access routines
   9.243  **************************************************************************/
   9.244  
   9.245 -void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
   9.246 +void vcpu_set_metaphysical_mode(VCPU * vcpu, BOOLEAN newmode)
   9.247  {
   9.248  	/* only do something if mode changes */
   9.249 -	if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
   9.250 -		PSCB(vcpu,metaphysical_mode) = newmode;
   9.251 -		if (newmode) set_metaphysical_rr0();
   9.252 -		else if (PSCB(vcpu,rrs[0]) != -1)
   9.253 -			set_one_rr(0, PSCB(vcpu,rrs[0]));
   9.254 +	if (!!newmode ^ !!PSCB(vcpu, metaphysical_mode)) {
   9.255 +		PSCB(vcpu, metaphysical_mode) = newmode;
   9.256 +		if (newmode)
   9.257 +			set_metaphysical_rr0();
   9.258 +		else if (PSCB(vcpu, rrs[0]) != -1)
   9.259 +			set_one_rr(0, PSCB(vcpu, rrs[0]));
   9.260  	}
   9.261  }
   9.262  
   9.263 -IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu)
   9.264 +IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu)
   9.265  {
   9.266 -	vcpu_set_metaphysical_mode(vcpu,TRUE);
   9.267 +	vcpu_set_metaphysical_mode(vcpu, TRUE);
   9.268  	return IA64_NO_FAULT;
   9.269  }
   9.270  
   9.271 -IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
   9.272 +IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24)
   9.273  {
   9.274  	struct ia64_psr psr, imm, *ipsr;
   9.275  	REGS *regs = vcpu_regs(vcpu);
   9.276 @@ -238,72 +252,89 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, 
   9.277  	//PRIVOP_COUNT_ADDR(regs,_RSM);
   9.278  	// TODO: All of these bits need to be virtualized
   9.279  	// TODO: Only allowed for current vcpu
   9.280 -	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   9.281 +	__asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
   9.282  	ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   9.283  	imm = *(struct ia64_psr *)&imm24;
   9.284  	// interrupt flag
   9.285  	if (imm.i)
   9.286 -	    vcpu->vcpu_info->evtchn_upcall_mask = 1;
   9.287 -	if (imm.ic)  PSCB(vcpu,interrupt_collection_enabled) = 0;
   9.288 +		vcpu->vcpu_info->evtchn_upcall_mask = 1;
   9.289 +	if (imm.ic)
   9.290 +		PSCB(vcpu, interrupt_collection_enabled) = 0;
   9.291  	// interrupt collection flag
   9.292  	//if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
   9.293  	// just handle psr.up and psr.pp for now
   9.294 -	if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
   9.295 -		| IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
   9.296 -		| IA64_PSR_DFL | IA64_PSR_DFH))
   9.297 -			return (IA64_ILLOP_FAULT);
   9.298 -	if (imm.dfh) ipsr->dfh = 0;
   9.299 -	if (imm.dfl) ipsr->dfl = 0;
   9.300 +	if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP |
   9.301 +		      IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT |
   9.302 +		      IA64_PSR_DFL | IA64_PSR_DFH))
   9.303 +		return IA64_ILLOP_FAULT;
   9.304 +	if (imm.dfh)
   9.305 +		ipsr->dfh = 0;
   9.306 +	if (imm.dfl)
   9.307 +		ipsr->dfl = 0;
   9.308  	if (imm.pp) {
   9.309  		ipsr->pp = 1;
   9.310  		psr.pp = 1;	// priv perf ctrs always enabled
   9.311 -		PSCB(vcpu,vpsr_pp) = 0;	// but fool the domain if it gets psr
   9.312 +		PSCB(vcpu, vpsr_pp) = 0; // but fool the domain if it gets psr
   9.313 +	}
   9.314 +	if (imm.up) {
   9.315 +		ipsr->up = 0;
   9.316 +		psr.up = 0;
   9.317  	}
   9.318 -	if (imm.up) { ipsr->up = 0; psr.up = 0; }
   9.319 -	if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
   9.320 -	if (imm.be) ipsr->be = 0;
   9.321 -	if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
   9.322 -	__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
   9.323 -	return IA64_NO_FAULT;
   9.324 -}
   9.325 -
   9.326 -
   9.327 -IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
   9.328 -{
   9.329 -	vcpu_set_metaphysical_mode(vcpu,FALSE);
   9.330 +	if (imm.sp) {
   9.331 +		ipsr->sp = 0;
   9.332 +		psr.sp = 0;
   9.333 +	}
   9.334 +	if (imm.be)
   9.335 +		ipsr->be = 0;
   9.336 +	if (imm.dt)
   9.337 +		vcpu_set_metaphysical_mode(vcpu, TRUE);
   9.338 +	__asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
   9.339  	return IA64_NO_FAULT;
   9.340  }
   9.341  
   9.342 -IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
   9.343 +IA64FAULT vcpu_set_psr_dt(VCPU * vcpu)
   9.344  {
   9.345 -	vcpu->vcpu_info->evtchn_upcall_mask = 0;
   9.346 -	PSCB(vcpu,interrupt_collection_enabled) = 1;
   9.347 +	vcpu_set_metaphysical_mode(vcpu, FALSE);
   9.348  	return IA64_NO_FAULT;
   9.349  }
   9.350  
   9.351 -IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
   9.352 +IA64FAULT vcpu_set_psr_i(VCPU * vcpu)
   9.353 +{
   9.354 +	vcpu->vcpu_info->evtchn_upcall_mask = 0;
   9.355 +	PSCB(vcpu, interrupt_collection_enabled) = 1;
   9.356 +	return IA64_NO_FAULT;
   9.357 +}
   9.358 +
   9.359 +IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm24)
   9.360  {
   9.361  	struct ia64_psr psr, imm, *ipsr;
   9.362  	REGS *regs = vcpu_regs(vcpu);
   9.363 -	UINT64 mask, enabling_interrupts = 0;
   9.364 +	u64 mask, enabling_interrupts = 0;
   9.365  
   9.366  	//PRIVOP_COUNT_ADDR(regs,_SSM);
   9.367  	// TODO: All of these bits need to be virtualized
   9.368 -	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   9.369 +	__asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
   9.370  	imm = *(struct ia64_psr *)&imm24;
   9.371  	ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   9.372  	// just handle psr.sp,pp and psr.i,ic (and user mask) for now
   9.373 -	mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
   9.374 -		IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
   9.375 -	if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
   9.376 -	if (imm.dfh) ipsr->dfh = 1;
   9.377 -	if (imm.dfl) ipsr->dfl = 1;
   9.378 +	mask =
   9.379 +	    IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_UM |
   9.380 +	    IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH;
   9.381 +	if (imm24 & ~mask)
   9.382 +		return IA64_ILLOP_FAULT;
   9.383 +	if (imm.dfh)
   9.384 +		ipsr->dfh = 1;
   9.385 +	if (imm.dfl)
   9.386 +		ipsr->dfl = 1;
   9.387  	if (imm.pp) {
   9.388  		ipsr->pp = 1;
   9.389  		psr.pp = 1;
   9.390 -		PSCB(vcpu,vpsr_pp) = 1;
   9.391 +		PSCB(vcpu, vpsr_pp) = 1;
   9.392  	}
   9.393 -	if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
   9.394 +	if (imm.sp) {
   9.395 +		ipsr->sp = 1;
   9.396 +		psr.sp = 1;
   9.397 +	}
   9.398  	if (imm.i) {
   9.399  		if (vcpu->vcpu_info->evtchn_upcall_mask) {
   9.400  //printf("vcpu_set_psr_sm: psr.ic 0->1\n");
   9.401 @@ -311,114 +342,169 @@ IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UI
   9.402  		}
   9.403  		vcpu->vcpu_info->evtchn_upcall_mask = 0;
   9.404  	}
   9.405 -	if (imm.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
   9.406 +	if (imm.ic)
   9.407 +		PSCB(vcpu, interrupt_collection_enabled) = 1;
   9.408  	// TODO: do this faster
   9.409 -	if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
   9.410 -	if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
   9.411 -	if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
   9.412 -	if (imm.up) { ipsr->up = 1; psr.up = 1; }
   9.413 +	if (imm.mfl) {
   9.414 +		ipsr->mfl = 1;
   9.415 +		psr.mfl = 1;
   9.416 +	}
   9.417 +	if (imm.mfh) {
   9.418 +		ipsr->mfh = 1;
   9.419 +		psr.mfh = 1;
   9.420 +	}
   9.421 +	if (imm.ac) {
   9.422 +		ipsr->ac = 1;
   9.423 +		psr.ac = 1;
   9.424 +	}
   9.425 +	if (imm.up) {
   9.426 +		ipsr->up = 1;
   9.427 +		psr.up = 1;
   9.428 +	}
   9.429  	if (imm.be) {
   9.430  		printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
   9.431 -		return (IA64_ILLOP_FAULT);
   9.432 +		return IA64_ILLOP_FAULT;
   9.433  	}
   9.434 -	if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
   9.435 -	__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
   9.436 +	if (imm.dt)
   9.437 +		vcpu_set_metaphysical_mode(vcpu, FALSE);
   9.438 +	__asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
   9.439  	if (enabling_interrupts &&
   9.440 -		vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
   9.441 -			PSCB(vcpu,pending_interruption) = 1;
   9.442 +	    vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
   9.443 +		PSCB(vcpu, pending_interruption) = 1;
   9.444  	return IA64_NO_FAULT;
   9.445  }
   9.446  
   9.447 -IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
   9.448 +IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val)
   9.449  {
   9.450  	struct ia64_psr psr, newpsr, *ipsr;
   9.451  	REGS *regs = vcpu_regs(vcpu);
   9.452 -	UINT64 enabling_interrupts = 0;
   9.453 +	u64 enabling_interrupts = 0;
   9.454  
   9.455  	// TODO: All of these bits need to be virtualized
   9.456 -	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   9.457 +	__asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
   9.458  	newpsr = *(struct ia64_psr *)&val;
   9.459  	ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   9.460  	// just handle psr.up and psr.pp for now
   9.461 -	//if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
   9.462 +	//if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP))
   9.463 +	//	return IA64_ILLOP_FAULT;
   9.464  	// however trying to set other bits can't be an error as it is in ssm
   9.465 -	if (newpsr.dfh) ipsr->dfh = 1;
   9.466 -	if (newpsr.dfl) ipsr->dfl = 1;
   9.467 +	if (newpsr.dfh)
   9.468 +		ipsr->dfh = 1;
   9.469 +	if (newpsr.dfl)
   9.470 +		ipsr->dfl = 1;
   9.471  	if (newpsr.pp) {
   9.472 -		ipsr->pp = 1; psr.pp = 1;
   9.473 -		PSCB(vcpu,vpsr_pp) = 1;
   9.474 +		ipsr->pp = 1;
   9.475 +		psr.pp = 1;
   9.476 +		PSCB(vcpu, vpsr_pp) = 1;
   9.477 +	} else {
   9.478 +		ipsr->pp = 1;
   9.479 +		psr.pp = 1;
   9.480 +		PSCB(vcpu, vpsr_pp) = 0;
   9.481  	}
   9.482 -	else {
   9.483 -		ipsr->pp = 1; psr.pp = 1;
   9.484 -		PSCB(vcpu,vpsr_pp) = 0;
   9.485 +	if (newpsr.up) {
   9.486 +		ipsr->up = 1;
   9.487 +		psr.up = 1;
   9.488  	}
   9.489 -	if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
   9.490 -	if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
   9.491 +	if (newpsr.sp) {
   9.492 +		ipsr->sp = 1;
   9.493 +		psr.sp = 1;
   9.494 +	}
   9.495  	if (newpsr.i) {
   9.496  		if (vcpu->vcpu_info->evtchn_upcall_mask)
   9.497  			enabling_interrupts = 1;
   9.498  		vcpu->vcpu_info->evtchn_upcall_mask = 0;
   9.499  	}
   9.500 -	if (newpsr.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
   9.501 -	if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
   9.502 -	if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
   9.503 -	if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
   9.504 -	if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
   9.505 -	if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
   9.506 -	else vcpu_set_metaphysical_mode(vcpu,TRUE);
   9.507 +	if (newpsr.ic)
   9.508 +		PSCB(vcpu, interrupt_collection_enabled) = 1;
   9.509 +	if (newpsr.mfl) {
   9.510 +		ipsr->mfl = 1;
   9.511 +		psr.mfl = 1;
   9.512 +	}
   9.513 +	if (newpsr.mfh) {
   9.514 +		ipsr->mfh = 1;
   9.515 +		psr.mfh = 1;
   9.516 +	}
   9.517 +	if (newpsr.ac) {
   9.518 +		ipsr->ac = 1;
   9.519 +		psr.ac = 1;
   9.520 +	}
   9.521 +	if (newpsr.up) {
   9.522 +		ipsr->up = 1;
   9.523 +		psr.up = 1;
   9.524 +	}
   9.525 +	if (newpsr.dt && newpsr.rt)
   9.526 +		vcpu_set_metaphysical_mode(vcpu, FALSE);
   9.527 +	else
   9.528 +		vcpu_set_metaphysical_mode(vcpu, TRUE);
   9.529  	if (newpsr.be) {
   9.530  		printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
   9.531 -		return (IA64_ILLOP_FAULT);
   9.532 +		return IA64_ILLOP_FAULT;
   9.533  	}
   9.534  	if (enabling_interrupts &&
   9.535 -		vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
   9.536 -			PSCB(vcpu,pending_interruption) = 1;
   9.537 +	    vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
   9.538 +		PSCB(vcpu, pending_interruption) = 1;
   9.539  	return IA64_NO_FAULT;
   9.540  }
   9.541  
   9.542 -IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
   9.543 +IA64FAULT vcpu_get_psr(VCPU * vcpu, u64 * pval)
   9.544  {
   9.545  	REGS *regs = vcpu_regs(vcpu);
   9.546  	struct ia64_psr newpsr;
   9.547  
   9.548  	newpsr = *(struct ia64_psr *)&regs->cr_ipsr;
   9.549 -	if (newpsr.cpl == 2) newpsr.cpl = 0;
   9.550 -	if (!vcpu->vcpu_info->evtchn_upcall_mask) newpsr.i = 1;
   9.551 -	else newpsr.i = 0;
   9.552 -	if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
   9.553 -	else newpsr.ic = 0;
   9.554 -	if (PSCB(vcpu,metaphysical_mode)) newpsr.dt = 0;
   9.555 -	else newpsr.dt = 1;
   9.556 -	if (PSCB(vcpu,vpsr_pp)) newpsr.pp = 1;
   9.557 -	else newpsr.pp = 0;
   9.558 +	if (newpsr.cpl == 2)
   9.559 +		newpsr.cpl = 0;
   9.560 +	if (!vcpu->vcpu_info->evtchn_upcall_mask)
   9.561 +		newpsr.i = 1;
   9.562 +	else
   9.563 +		newpsr.i = 0;
   9.564 +	if (PSCB(vcpu, interrupt_collection_enabled))
   9.565 +		newpsr.ic = 1;
   9.566 +	else
   9.567 +		newpsr.ic = 0;
   9.568 +	if (PSCB(vcpu, metaphysical_mode))
   9.569 +		newpsr.dt = 0;
   9.570 +	else
   9.571 +		newpsr.dt = 1;
   9.572 +	if (PSCB(vcpu, vpsr_pp))
   9.573 +		newpsr.pp = 1;
   9.574 +	else
   9.575 +		newpsr.pp = 0;
   9.576  	*pval = *(unsigned long *)&newpsr;
   9.577  	return IA64_NO_FAULT;
   9.578  }
   9.579  
   9.580 -BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
   9.581 +BOOLEAN vcpu_get_psr_ic(VCPU * vcpu)
   9.582  {
   9.583 -	return !!PSCB(vcpu,interrupt_collection_enabled);
   9.584 +	return !!PSCB(vcpu, interrupt_collection_enabled);
   9.585  }
   9.586  
   9.587 -BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
   9.588 +BOOLEAN vcpu_get_psr_i(VCPU * vcpu)
   9.589  {
   9.590  	return !vcpu->vcpu_info->evtchn_upcall_mask;
   9.591  }
   9.592  
   9.593 -UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
   9.594 +u64 vcpu_get_ipsr_int_state(VCPU * vcpu, u64 prevpsr)
   9.595  {
   9.596 -	UINT64 dcr = PSCBX(vcpu,dcr);
   9.597 +	u64 dcr = PSCBX(vcpu, dcr);
   9.598  	PSR psr;
   9.599  
   9.600  	//printf("*** vcpu_get_ipsr_int_state (0x%016lx)...\n",prevpsr);
   9.601  	psr.i64 = prevpsr;
   9.602 -	psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
   9.603 -	psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
   9.604 -	psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
   9.605 +	psr.ia64_psr.be = 0;
   9.606 +	if (dcr & IA64_DCR_BE)
   9.607 +		psr.ia64_psr.be = 1;
   9.608 +	psr.ia64_psr.pp = 0;
   9.609 +	if (dcr & IA64_DCR_PP)
   9.610 +		psr.ia64_psr.pp = 1;
   9.611 +	psr.ia64_psr.ic = PSCB(vcpu, interrupt_collection_enabled);
   9.612  	psr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
   9.613 -	psr.ia64_psr.bn = PSCB(vcpu,banknum);
   9.614 -	psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
   9.615 -	if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
   9.616 +	psr.ia64_psr.bn = PSCB(vcpu, banknum);
   9.617 +	psr.ia64_psr.dt = 1;
   9.618 +	psr.ia64_psr.it = 1;
   9.619 +	psr.ia64_psr.rt = 1;
   9.620 +	if (psr.ia64_psr.cpl == 2)
   9.621 +		psr.ia64_psr.cpl = 0;	// !!!! fool domain
   9.622  	// psr.pk = 1;
   9.623  	//printf("returns 0x%016lx...\n",psr.i64);
   9.624  	return psr.i64;
   9.625 @@ -428,223 +514,227 @@ UINT64 vcpu_get_ipsr_int_state(VCPU *vcp
   9.626   VCPU control register access routines
   9.627  **************************************************************************/
   9.628  
   9.629 -IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
   9.630 +IA64FAULT vcpu_get_dcr(VCPU * vcpu, u64 * pval)
   9.631  {
   9.632  //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
   9.633  	// Reads of cr.dcr on Xen always have the sign bit set, so
   9.634  	// a domain can differentiate whether it is running on SP or not
   9.635 -	*pval = PSCBX(vcpu,dcr) | 0x8000000000000000L;
   9.636 -	return (IA64_NO_FAULT);
   9.637 +	*pval = PSCBX(vcpu, dcr) | 0x8000000000000000L;
   9.638 +	return IA64_NO_FAULT;
   9.639  }
   9.640  
   9.641 -IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
   9.642 +IA64FAULT vcpu_get_iva(VCPU * vcpu, u64 * pval)
   9.643  {
   9.644 -    if(VMX_DOMAIN(vcpu)){
   9.645 -    	*pval = PSCB(vcpu,iva) & ~0x7fffL;
   9.646 -    }else{
   9.647 -        *pval = PSCBX(vcpu,iva) & ~0x7fffL;
   9.648 -    }
   9.649 -	return (IA64_NO_FAULT);
   9.650 +	if (VMX_DOMAIN(vcpu))
   9.651 +		*pval = PSCB(vcpu, iva) & ~0x7fffL;
   9.652 +	else
   9.653 +		*pval = PSCBX(vcpu, iva) & ~0x7fffL;
   9.654 +
   9.655 +	return IA64_NO_FAULT;
   9.656  }
   9.657  
   9.658 -IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
   9.659 +IA64FAULT vcpu_get_pta(VCPU * vcpu, u64 * pval)
   9.660  {
   9.661 -	*pval = PSCB(vcpu,pta);
   9.662 -	return (IA64_NO_FAULT);
   9.663 +	*pval = PSCB(vcpu, pta);
   9.664 +	return IA64_NO_FAULT;
   9.665  }
   9.666  
   9.667 -IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
   9.668 +IA64FAULT vcpu_get_ipsr(VCPU * vcpu, u64 * pval)
   9.669  {
   9.670  	//REGS *regs = vcpu_regs(vcpu);
   9.671  	//*pval = regs->cr_ipsr;
   9.672 -	*pval = PSCB(vcpu,ipsr);
   9.673 -	return (IA64_NO_FAULT);
   9.674 +	*pval = PSCB(vcpu, ipsr);
   9.675 +	return IA64_NO_FAULT;
   9.676  }
   9.677  
   9.678 -IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
   9.679 +IA64FAULT vcpu_get_isr(VCPU * vcpu, u64 * pval)
   9.680  {
   9.681 -	*pval = PSCB(vcpu,isr);
   9.682 -	return (IA64_NO_FAULT);
   9.683 +	*pval = PSCB(vcpu, isr);
   9.684 +	return IA64_NO_FAULT;
   9.685  }
   9.686  
   9.687 -IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
   9.688 +IA64FAULT vcpu_get_iip(VCPU * vcpu, u64 * pval)
   9.689  {
   9.690  	//REGS *regs = vcpu_regs(vcpu);
   9.691  	//*pval = regs->cr_iip;
   9.692 -	*pval = PSCB(vcpu,iip);
   9.693 -	return (IA64_NO_FAULT);
   9.694 +	*pval = PSCB(vcpu, iip);
   9.695 +	return IA64_NO_FAULT;
   9.696  }
   9.697  
   9.698 -IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
   9.699 +IA64FAULT vcpu_get_ifa(VCPU * vcpu, u64 * pval)
   9.700  {
   9.701  	PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_get_ifa);
   9.702 -	*pval = PSCB(vcpu,ifa);
   9.703 -	return (IA64_NO_FAULT);
   9.704 +	*pval = PSCB(vcpu, ifa);
   9.705 +	return IA64_NO_FAULT;
   9.706  }
   9.707  
   9.708 -unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
   9.709 +unsigned long vcpu_get_rr_ps(VCPU * vcpu, u64 vadr)
   9.710  {
   9.711  	ia64_rr rr;
   9.712  
   9.713 -	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
   9.714 -	return(rr.ps);
   9.715 +	rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
   9.716 +	return rr.ps;
   9.717  }
   9.718  
   9.719 -unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
   9.720 +unsigned long vcpu_get_rr_rid(VCPU * vcpu, u64 vadr)
   9.721  {
   9.722  	ia64_rr rr;
   9.723  
   9.724 -	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
   9.725 -	return(rr.rid);
   9.726 +	rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
   9.727 +	return rr.rid;
   9.728  }
   9.729  
   9.730 -unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
   9.731 +unsigned long vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa)
   9.732  {
   9.733  	ia64_rr rr;
   9.734  
   9.735  	rr.rrval = 0;
   9.736 -	rr.ps = vcpu_get_rr_ps(vcpu,ifa);
   9.737 -	rr.rid = vcpu_get_rr_rid(vcpu,ifa);
   9.738 -	return (rr.rrval);
   9.739 +	rr.ps = vcpu_get_rr_ps(vcpu, ifa);
   9.740 +	rr.rid = vcpu_get_rr_rid(vcpu, ifa);
   9.741 +	return rr.rrval;
   9.742  }
   9.743  
   9.744 -
   9.745 -IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
   9.746 +IA64FAULT vcpu_get_itir(VCPU * vcpu, u64 * pval)
   9.747  {
   9.748 -	UINT64 val = PSCB(vcpu,itir);
   9.749 +	u64 val = PSCB(vcpu, itir);
   9.750  	*pval = val;
   9.751 -	return (IA64_NO_FAULT);
   9.752 +	return IA64_NO_FAULT;
   9.753  }
   9.754  
   9.755 -IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
   9.756 +IA64FAULT vcpu_get_iipa(VCPU * vcpu, u64 * pval)
   9.757  {
   9.758 -	UINT64 val = PSCB(vcpu,iipa);
   9.759 +	u64 val = PSCB(vcpu, iipa);
   9.760  	// SP entry code does not save iipa yet nor does it get
   9.761  	//  properly delivered in the pscb
   9.762  //	printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
   9.763  	*pval = val;
   9.764 -	return (IA64_NO_FAULT);
   9.765 +	return IA64_NO_FAULT;
   9.766  }
   9.767  
   9.768 -IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
   9.769 +IA64FAULT vcpu_get_ifs(VCPU * vcpu, u64 * pval)
   9.770  {
   9.771  	//PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
   9.772  	//*pval = PSCB(vcpu,regs).cr_ifs;
   9.773 -	*pval = PSCB(vcpu,ifs);
   9.774 -	PSCB(vcpu,incomplete_regframe) = 0;
   9.775 -	return (IA64_NO_FAULT);
   9.776 +	*pval = PSCB(vcpu, ifs);
   9.777 +	PSCB(vcpu, incomplete_regframe) = 0;
   9.778 +	return IA64_NO_FAULT;
   9.779  }
   9.780  
   9.781 -IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
   9.782 +IA64FAULT vcpu_get_iim(VCPU * vcpu, u64 * pval)
   9.783  {
   9.784 -	UINT64 val = PSCB(vcpu,iim);
   9.785 +	u64 val = PSCB(vcpu, iim);
   9.786  	*pval = val;
   9.787 -	return (IA64_NO_FAULT);
   9.788 +	return IA64_NO_FAULT;
   9.789  }
   9.790  
   9.791 -IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
   9.792 +IA64FAULT vcpu_get_iha(VCPU * vcpu, u64 * pval)
   9.793  {
   9.794  	PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_thash);
   9.795 -	*pval = PSCB(vcpu,iha);
   9.796 -	return (IA64_NO_FAULT);
   9.797 +	*pval = PSCB(vcpu, iha);
   9.798 +	return IA64_NO_FAULT;
   9.799  }
   9.800  
   9.801 -IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
   9.802 +IA64FAULT vcpu_set_dcr(VCPU * vcpu, u64 val)
   9.803  {
   9.804  	// Reads of cr.dcr on SP always have the sign bit set, so
   9.805  	// a domain can differentiate whether it is running on SP or not
   9.806  	// Thus, writes of DCR should ignore the sign bit
   9.807  //verbose("vcpu_set_dcr: called\n");
   9.808 -	PSCBX(vcpu,dcr) = val & ~0x8000000000000000L;
   9.809 -	return (IA64_NO_FAULT);
   9.810 +	PSCBX(vcpu, dcr) = val & ~0x8000000000000000L;
   9.811 +	return IA64_NO_FAULT;
   9.812  }
   9.813  
   9.814 -IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
   9.815 +IA64FAULT vcpu_set_iva(VCPU * vcpu, u64 val)
   9.816  {
   9.817 -    if(VMX_DOMAIN(vcpu)){
   9.818 -    	PSCB(vcpu,iva) = val & ~0x7fffL;
   9.819 -    }else{
   9.820 -        PSCBX(vcpu,iva) = val & ~0x7fffL;
   9.821 -    }
   9.822 -	return (IA64_NO_FAULT);
   9.823 +	if (VMX_DOMAIN(vcpu))
   9.824 +		PSCB(vcpu, iva) = val & ~0x7fffL;
   9.825 +	else
   9.826 +		PSCBX(vcpu, iva) = val & ~0x7fffL;
   9.827 +
   9.828 +	return IA64_NO_FAULT;
   9.829  }
   9.830  
   9.831 -IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
   9.832 +IA64FAULT vcpu_set_pta(VCPU * vcpu, u64 val)
   9.833  {
   9.834  	if (val & IA64_PTA_LFMT) {
   9.835  		printf("*** No support for VHPT long format yet!!\n");
   9.836 -		return (IA64_ILLOP_FAULT);
   9.837 +		return IA64_ILLOP_FAULT;
   9.838  	}
   9.839 -	if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
   9.840 -	if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
   9.841 -	PSCB(vcpu,pta) = val;
   9.842 +	if (val & (0x3f << 9))	/* reserved fields */
   9.843 +		return IA64_RSVDREG_FAULT;
   9.844 +	if (val & 2)		/* reserved fields */
   9.845 +		return IA64_RSVDREG_FAULT;
   9.846 +	PSCB(vcpu, pta) = val;
   9.847  	return IA64_NO_FAULT;
   9.848  }
   9.849  
   9.850 -IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
   9.851 +IA64FAULT vcpu_set_ipsr(VCPU * vcpu, u64 val)
   9.852  {
   9.853 -	PSCB(vcpu,ipsr) = val;
   9.854 +	PSCB(vcpu, ipsr) = val;
   9.855  	return IA64_NO_FAULT;
   9.856  }
   9.857  
   9.858 -IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
   9.859 +IA64FAULT vcpu_set_isr(VCPU * vcpu, u64 val)
   9.860  {
   9.861 -	PSCB(vcpu,isr) = val;
   9.862 +	PSCB(vcpu, isr) = val;
   9.863  	return IA64_NO_FAULT;
   9.864  }
   9.865  
   9.866 -IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
   9.867 +IA64FAULT vcpu_set_iip(VCPU * vcpu, u64 val)
   9.868  {
   9.869 -	PSCB(vcpu,iip) = val;
   9.870 +	PSCB(vcpu, iip) = val;
   9.871  	return IA64_NO_FAULT;
   9.872  }
   9.873  
   9.874 -IA64FAULT vcpu_increment_iip(VCPU *vcpu)
   9.875 +IA64FAULT vcpu_increment_iip(VCPU * vcpu)
   9.876  {
   9.877  	REGS *regs = vcpu_regs(vcpu);
   9.878  	struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   9.879 -	if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
   9.880 -	else ipsr->ri++;
   9.881 -	return (IA64_NO_FAULT);
   9.882 -}
   9.883 -
   9.884 -IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
   9.885 -{
   9.886 -	PSCB(vcpu,ifa) = val;
   9.887 +	if (ipsr->ri == 2) {
   9.888 +		ipsr->ri = 0;
   9.889 +		regs->cr_iip += 16;
   9.890 +	} else
   9.891 +		ipsr->ri++;
   9.892  	return IA64_NO_FAULT;
   9.893  }
   9.894  
   9.895 -IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
   9.896 +IA64FAULT vcpu_set_ifa(VCPU * vcpu, u64 val)
   9.897  {
   9.898 -	PSCB(vcpu,itir) = val;
   9.899 +	PSCB(vcpu, ifa) = val;
   9.900  	return IA64_NO_FAULT;
   9.901  }
   9.902  
   9.903 -IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
   9.904 +IA64FAULT vcpu_set_itir(VCPU * vcpu, u64 val)
   9.905 +{
   9.906 +	PSCB(vcpu, itir) = val;
   9.907 +	return IA64_NO_FAULT;
   9.908 +}
   9.909 +
   9.910 +IA64FAULT vcpu_set_iipa(VCPU * vcpu, u64 val)
   9.911  {
   9.912  	// SP entry code does not save iipa yet nor does it get
   9.913  	//  properly delivered in the pscb
   9.914  //	printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
   9.915 -	PSCB(vcpu,iipa) = val;
   9.916 +	PSCB(vcpu, iipa) = val;
   9.917  	return IA64_NO_FAULT;
   9.918  }
   9.919  
   9.920 -IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
   9.921 +IA64FAULT vcpu_set_ifs(VCPU * vcpu, u64 val)
   9.922  {
   9.923  	//REGS *regs = vcpu_regs(vcpu);
   9.924 -	PSCB(vcpu,ifs) = val;
   9.925 +	PSCB(vcpu, ifs) = val;
   9.926  	return IA64_NO_FAULT;
   9.927  }
   9.928  
   9.929 -IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
   9.930 +IA64FAULT vcpu_set_iim(VCPU * vcpu, u64 val)
   9.931  {
   9.932 -	PSCB(vcpu,iim) = val;
   9.933 +	PSCB(vcpu, iim) = val;
   9.934  	return IA64_NO_FAULT;
   9.935  }
   9.936  
   9.937 -IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
   9.938 +IA64FAULT vcpu_set_iha(VCPU * vcpu, u64 val)
   9.939  {
   9.940 -	PSCB(vcpu,iha) = val;
   9.941 +	PSCB(vcpu, iha) = val;
   9.942  	return IA64_NO_FAULT;
   9.943  }
   9.944  
   9.945 @@ -652,12 +742,12 @@ IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT6
   9.946   VCPU interrupt control register access routines
   9.947  **************************************************************************/
   9.948  
   9.949 -void vcpu_pend_unspecified_interrupt(VCPU *vcpu)
   9.950 +void vcpu_pend_unspecified_interrupt(VCPU * vcpu)
   9.951  {
   9.952 -	PSCB(vcpu,pending_interruption) = 1;
   9.953 +	PSCB(vcpu, pending_interruption) = 1;
   9.954  }
   9.955  
   9.956 -void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
   9.957 +void vcpu_pend_interrupt(VCPU * vcpu, u64 vector)
   9.958  {
   9.959  	if (vector & ~0xff) {
   9.960  		printf("vcpu_pend_interrupt: bad vector\n");
   9.961 @@ -665,15 +755,16 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
   9.962  	}
   9.963  
   9.964  	if (vcpu->arch.event_callback_ip) {
   9.965 -		printf("Deprecated interface. Move to new event based solution\n");
   9.966 +		printf("Deprecated interface. Move to new event based "
   9.967 +		       "solution\n");
   9.968  		return;
   9.969  	}
   9.970 -		
   9.971 -	if ( VMX_DOMAIN(vcpu) ) {
   9.972 -		set_bit(vector,VCPU(vcpu,irr));
   9.973 +
   9.974 +	if (VMX_DOMAIN(vcpu)) {
   9.975 +		set_bit(vector, VCPU(vcpu, irr));
   9.976  	} else {
   9.977 -		set_bit(vector,PSCBX(vcpu,irr));
   9.978 -		PSCB(vcpu,pending_interruption) = 1;
   9.979 +		set_bit(vector, PSCBX(vcpu, irr));
   9.980 +		PSCB(vcpu, pending_interruption) = 1;
   9.981  	}
   9.982  }
   9.983  
   9.984 @@ -686,9 +777,9 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
   9.985   * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
   9.986   * this routine also ignores pscb.interrupt_delivery_enabled
   9.987   * and this must be checked independently; see vcpu_deliverable interrupts() */
   9.988 -UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
   9.989 +u64 vcpu_check_pending_interrupts(VCPU * vcpu)
   9.990  {
   9.991 -	UINT64 *p, *r, bits, bitnum, mask, i, vector;
   9.992 +	u64 *p, *r, bits, bitnum, mask, i, vector;
   9.993  
   9.994  	if (vcpu->arch.event_callback_ip)
   9.995  		return SPURIOUS_VECTOR;
   9.996 @@ -697,38 +788,41 @@ UINT64 vcpu_check_pending_interrupts(VCP
   9.997  	 * event injection without handle. Later guest may throw out
   9.998  	 * the event itself.
   9.999  	 */
  9.1000 -check_start:
  9.1001 -	if (event_pending(vcpu) && 
  9.1002 -		!test_bit(vcpu->domain->shared_info->arch.evtchn_vector,
  9.1003 -			&PSCBX(vcpu, insvc[0])))
  9.1004 -		vcpu_pend_interrupt(vcpu, vcpu->domain->shared_info->arch.evtchn_vector);
  9.1005 + check_start:
  9.1006 +	if (event_pending(vcpu) &&
  9.1007 +	    !test_bit(vcpu->domain->shared_info->arch.evtchn_vector,
  9.1008 +		      &PSCBX(vcpu, insvc[0])))
  9.1009 +		vcpu_pend_interrupt(vcpu,
  9.1010 +		                    vcpu->domain->shared_info->arch.
  9.1011 +		                    evtchn_vector);
  9.1012  
  9.1013 -	p = &PSCBX(vcpu,irr[3]);
  9.1014 -	r = &PSCBX(vcpu,insvc[3]);
  9.1015 -	for (i = 3; ; p--, r--, i--) {
  9.1016 -		bits = *p ;
  9.1017 -		if (bits) break; // got a potential interrupt
  9.1018 +	p = &PSCBX(vcpu, irr[3]);
  9.1019 +	r = &PSCBX(vcpu, insvc[3]);
  9.1020 +	for (i = 3 ;; p--, r--, i--) {
  9.1021 +		bits = *p;
  9.1022 +		if (bits)
  9.1023 +			break;	// got a potential interrupt
  9.1024  		if (*r) {
  9.1025  			// nothing in this word which is pending+inservice
  9.1026  			// but there is one inservice which masks lower
  9.1027  			return SPURIOUS_VECTOR;
  9.1028  		}
  9.1029  		if (i == 0) {
  9.1030 -		// checked all bits... nothing pending+inservice
  9.1031 +			// checked all bits... nothing pending+inservice
  9.1032  			return SPURIOUS_VECTOR;
  9.1033  		}
  9.1034  	}
  9.1035  	// have a pending,deliverable interrupt... see if it is masked
  9.1036  	bitnum = ia64_fls(bits);
  9.1037  //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...\n",bitnum);
  9.1038 -	vector = bitnum+(i*64);
  9.1039 +	vector = bitnum + (i * 64);
  9.1040  	mask = 1L << bitnum;
  9.1041  	/* sanity check for guest timer interrupt */
  9.1042 -	if (vector == (PSCB(vcpu,itv) & 0xff)) {
  9.1043 +	if (vector == (PSCB(vcpu, itv) & 0xff)) {
  9.1044  		uint64_t now = ia64_get_itc();
  9.1045 -		if (now < PSCBX(vcpu,domain_itm)) {
  9.1046 +		if (now < PSCBX(vcpu, domain_itm)) {
  9.1047  //			printk("Ooops, pending guest timer before its due\n");
  9.1048 -			PSCBX(vcpu,irr[i]) &= ~mask;
  9.1049 +			PSCBX(vcpu, irr[i]) &= ~mask;
  9.1050  			goto check_start;
  9.1051  		}
  9.1052  	}
  9.1053 @@ -738,48 +832,47 @@ check_start:
  9.1054  //printf("but masked by equal inservice\n");
  9.1055  		return SPURIOUS_VECTOR;
  9.1056  	}
  9.1057 -	if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
  9.1058 +	if (PSCB(vcpu, tpr) & IA64_TPR_MMI) {
  9.1059  		// tpr.mmi is set
  9.1060  //printf("but masked by tpr.mmi\n");
  9.1061  		return SPURIOUS_VECTOR;
  9.1062  	}
  9.1063 -	if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
  9.1064 +	if (((PSCB(vcpu, tpr) & IA64_TPR_MIC) + 15) >= vector) {
  9.1065  		//tpr.mic masks class
  9.1066  //printf("but masked by tpr.mic\n");
  9.1067  		return SPURIOUS_VECTOR;
  9.1068  	}
  9.1069 -
  9.1070  //printf("returned to caller\n");
  9.1071  	return vector;
  9.1072  }
  9.1073  
  9.1074 -UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
  9.1075 +u64 vcpu_deliverable_interrupts(VCPU * vcpu)
  9.1076  {
  9.1077  	return (vcpu_get_psr_i(vcpu) &&
  9.1078  		vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
  9.1079  }
  9.1080  
  9.1081 -UINT64 vcpu_deliverable_timer(VCPU *vcpu)
  9.1082 +u64 vcpu_deliverable_timer(VCPU * vcpu)
  9.1083  {
  9.1084  	return (vcpu_get_psr_i(vcpu) &&
  9.1085 -		vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu,itv));
  9.1086 +		vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu, itv));
  9.1087  }
  9.1088  
  9.1089 -IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
  9.1090 +IA64FAULT vcpu_get_lid(VCPU * vcpu, u64 * pval)
  9.1091  {
  9.1092  	/* Use EID=0, ID=vcpu_id.  */
  9.1093  	*pval = vcpu->vcpu_id << 24;
  9.1094  	return IA64_NO_FAULT;
  9.1095  }
  9.1096  
  9.1097 -IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
  9.1098 +IA64FAULT vcpu_get_ivr(VCPU * vcpu, u64 * pval)
  9.1099  {
  9.1100  	int i;
  9.1101 -	UINT64 vector, mask;
  9.1102 +	u64 vector, mask;
  9.1103  
  9.1104  #define HEARTBEAT_FREQ 16	// period in seconds
  9.1105  #ifdef HEARTBEAT_FREQ
  9.1106 -#define N_DOMS 16	// period in seconds
  9.1107 +#define N_DOMS 16		// period in seconds
  9.1108  #if 0
  9.1109  	static long count[N_DOMS] = { 0 };
  9.1110  #endif
  9.1111 @@ -791,257 +884,269 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6
  9.1112  	static char firsttime[256];
  9.1113  	if (firstivr) {
  9.1114  		int i;
  9.1115 -		for (i=0;i<256;i++) firsttime[i]=1;
  9.1116 -		firstivr=0;
  9.1117 +		for (i = 0; i < 256; i++)
  9.1118 +			firsttime[i] = 1;
  9.1119 +		firstivr = 0;
  9.1120  	}
  9.1121  #endif
  9.1122  
  9.1123  	vector = vcpu_check_pending_interrupts(vcpu);
  9.1124  	if (vector == SPURIOUS_VECTOR) {
  9.1125 -		PSCB(vcpu,pending_interruption) = 0;
  9.1126 +		PSCB(vcpu, pending_interruption) = 0;
  9.1127  		*pval = vector;
  9.1128  		return IA64_NO_FAULT;
  9.1129  	}
  9.1130  #ifdef HEARTBEAT_FREQ
  9.1131 -	if (domid >= N_DOMS) domid = N_DOMS-1;
  9.1132 +	if (domid >= N_DOMS)
  9.1133 +		domid = N_DOMS - 1;
  9.1134  #if 0
  9.1135 -	if (vector == (PSCB(vcpu,itv) & 0xff)) {
  9.1136 -	    if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
  9.1137 -		printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
  9.1138 -			domid, count[domid], nonclockcount[domid]);
  9.1139 -		//count[domid] = 0;
  9.1140 -		//dump_runq();
  9.1141 -	    }
  9.1142 +	if (vector == (PSCB(vcpu, itv) & 0xff)) {
  9.1143 +		if (!(++count[domid] & ((HEARTBEAT_FREQ * 1024) - 1))) {
  9.1144 +			printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
  9.1145 +			       domid, count[domid], nonclockcount[domid]);
  9.1146 +			//count[domid] = 0;
  9.1147 +			//dump_runq();
  9.1148 +		}
  9.1149  	}
  9.1150  #endif
  9.1151 -	else nonclockcount[domid]++;
  9.1152 +	else
  9.1153 +		nonclockcount[domid]++;
  9.1154  #endif
  9.1155  	// now have an unmasked, pending, deliverable vector!
  9.1156  	// getting ivr has "side effects"
  9.1157  #ifdef IRQ_DEBUG
  9.1158  	if (firsttime[vector]) {
  9.1159  		printf("*** First get_ivr on vector=%lu,itc=%lx\n",
  9.1160 -			vector,ia64_get_itc());
  9.1161 -		firsttime[vector]=0;
  9.1162 +		       vector, ia64_get_itc());
  9.1163 +		firsttime[vector] = 0;
  9.1164  	}
  9.1165  #endif
  9.1166  	/* if delivering a timer interrupt, remember domain_itm, which
  9.1167  	 * needs to be done before clearing irr
  9.1168  	 */
  9.1169 -	if (vector == (PSCB(vcpu,itv) & 0xff)) {
  9.1170 -		PSCBX(vcpu,domain_itm_last) = PSCBX(vcpu,domain_itm);
  9.1171 +	if (vector == (PSCB(vcpu, itv) & 0xff)) {
  9.1172 +		PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
  9.1173  	}
  9.1174  
  9.1175  	i = vector >> 6;
  9.1176  	mask = 1L << (vector & 0x3f);
  9.1177  //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %lu\n",vector);
  9.1178 -	PSCBX(vcpu,insvc[i]) |= mask;
  9.1179 -	PSCBX(vcpu,irr[i]) &= ~mask;
  9.1180 +	PSCBX(vcpu, insvc[i]) |= mask;
  9.1181 +	PSCBX(vcpu, irr[i]) &= ~mask;
  9.1182  	//PSCB(vcpu,pending_interruption)--;
  9.1183  	*pval = vector;
  9.1184  	return IA64_NO_FAULT;
  9.1185  }
  9.1186  
  9.1187 -IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
  9.1188 +IA64FAULT vcpu_get_tpr(VCPU * vcpu, u64 * pval)
  9.1189  {
  9.1190 -	*pval = PSCB(vcpu,tpr);
  9.1191 -	return (IA64_NO_FAULT);
  9.1192 +	*pval = PSCB(vcpu, tpr);
  9.1193 +	return IA64_NO_FAULT;
  9.1194  }
  9.1195  
  9.1196 -IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
  9.1197 +IA64FAULT vcpu_get_eoi(VCPU * vcpu, u64 * pval)
  9.1198  {
  9.1199 -	*pval = 0L;  // reads of eoi always return 0
  9.1200 -	return (IA64_NO_FAULT);
  9.1201 +	*pval = 0L;		// reads of eoi always return 0
  9.1202 +	return IA64_NO_FAULT;
  9.1203  }
  9.1204  
  9.1205 -IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
  9.1206 +IA64FAULT vcpu_get_irr0(VCPU * vcpu, u64 * pval)
  9.1207  {
  9.1208  	*pval = PSCBX(vcpu, irr[0]);
  9.1209 -	return (IA64_NO_FAULT);
  9.1210 +	return IA64_NO_FAULT;
  9.1211  }
  9.1212  
  9.1213 -IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
  9.1214 +IA64FAULT vcpu_get_irr1(VCPU * vcpu, u64 * pval)
  9.1215  {
  9.1216  	*pval = PSCBX(vcpu, irr[1]);
  9.1217 -	return (IA64_NO_FAULT);
  9.1218 +	return IA64_NO_FAULT;
  9.1219  }
  9.1220  
  9.1221 -IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
  9.1222 +IA64FAULT vcpu_get_irr2(VCPU * vcpu, u64 * pval)
  9.1223  {
  9.1224  	*pval = PSCBX(vcpu, irr[2]);
  9.1225 -	return (IA64_NO_FAULT);
  9.1226 +	return IA64_NO_FAULT;
  9.1227  }
  9.1228  
  9.1229 -IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
  9.1230 +IA64FAULT vcpu_get_irr3(VCPU * vcpu, u64 * pval)
  9.1231  {
  9.1232  	*pval = PSCBX(vcpu, irr[3]);
  9.1233 -	return (IA64_NO_FAULT);
  9.1234 +	return IA64_NO_FAULT;
  9.1235  }
  9.1236  
  9.1237 -IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
  9.1238 +IA64FAULT vcpu_get_itv(VCPU * vcpu, u64 * pval)
  9.1239  {
  9.1240 -	*pval = PSCB(vcpu,itv);
  9.1241 -	return (IA64_NO_FAULT);
  9.1242 +	*pval = PSCB(vcpu, itv);
  9.1243 +	return IA64_NO_FAULT;
  9.1244  }
  9.1245  
  9.1246 -IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
  9.1247 +IA64FAULT vcpu_get_pmv(VCPU * vcpu, u64 * pval)
  9.1248  {
  9.1249 -	*pval = PSCB(vcpu,pmv);
  9.1250 -	return (IA64_NO_FAULT);
  9.1251 +	*pval = PSCB(vcpu, pmv);
  9.1252 +	return IA64_NO_FAULT;
  9.1253  }
  9.1254  
  9.1255 -IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
  9.1256 +IA64FAULT vcpu_get_cmcv(VCPU * vcpu, u64 * pval)
  9.1257  {
  9.1258 -	*pval = PSCB(vcpu,cmcv);
  9.1259 -	return (IA64_NO_FAULT);
  9.1260 +	*pval = PSCB(vcpu, cmcv);
  9.1261 +	return IA64_NO_FAULT;
  9.1262  }
  9.1263  
  9.1264 -IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
  9.1265 +IA64FAULT vcpu_get_lrr0(VCPU * vcpu, u64 * pval)
  9.1266  {
  9.1267  	// fix this when setting values other than m-bit is supported
  9.1268  	printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
  9.1269  	*pval = (1L << 16);
  9.1270 -	return (IA64_NO_FAULT);
  9.1271 +	return IA64_NO_FAULT;
  9.1272  }
  9.1273  
  9.1274 -IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
  9.1275 +IA64FAULT vcpu_get_lrr1(VCPU * vcpu, u64 * pval)
  9.1276  {
  9.1277  	// fix this when setting values other than m-bit is supported
  9.1278  	printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
  9.1279  	*pval = (1L << 16);
  9.1280 -	return (IA64_NO_FAULT);
  9.1281 +	return IA64_NO_FAULT;
  9.1282  }
  9.1283  
  9.1284 -IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
  9.1285 +IA64FAULT vcpu_set_lid(VCPU * vcpu, u64 val)
  9.1286  {
  9.1287  	printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
  9.1288 -	return (IA64_ILLOP_FAULT);
  9.1289 +	return IA64_ILLOP_FAULT;
  9.1290  }
  9.1291  
  9.1292 -IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
  9.1293 +IA64FAULT vcpu_set_tpr(VCPU * vcpu, u64 val)
  9.1294  {
  9.1295 -	if (val & 0xff00) return IA64_RSVDREG_FAULT;
  9.1296 -	PSCB(vcpu,tpr) = val;
  9.1297 +	if (val & 0xff00)
  9.1298 +		return IA64_RSVDREG_FAULT;
  9.1299 +	PSCB(vcpu, tpr) = val;
  9.1300  	/* This can unmask interrupts.  */
  9.1301  	if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
  9.1302 -		PSCB(vcpu,pending_interruption) = 1;
  9.1303 -	return (IA64_NO_FAULT);
  9.1304 +		PSCB(vcpu, pending_interruption) = 1;
  9.1305 +	return IA64_NO_FAULT;
  9.1306  }
  9.1307  
  9.1308 -IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
  9.1309 +IA64FAULT vcpu_set_eoi(VCPU * vcpu, u64 val)
  9.1310  {
  9.1311 -	UINT64 *p, bits, vec, bitnum;
  9.1312 +	u64 *p, bits, vec, bitnum;
  9.1313  	int i;
  9.1314  
  9.1315 -	p = &PSCBX(vcpu,insvc[3]);
  9.1316 -	for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
  9.1317 +	p = &PSCBX(vcpu, insvc[3]);
  9.1318 +	for (i = 3; (i >= 0) && !(bits = *p); i--, p--)
  9.1319 +		;
  9.1320  	if (i < 0) {
  9.1321  		printf("Trying to EOI interrupt when none are in-service.\n");
  9.1322  		return IA64_NO_FAULT;
  9.1323  	}
  9.1324  	bitnum = ia64_fls(bits);
  9.1325 -	vec = bitnum + (i*64);
  9.1326 +	vec = bitnum + (i * 64);
  9.1327  	/* clear the correct bit */
  9.1328  	bits &= ~(1L << bitnum);
  9.1329  	*p = bits;
  9.1330  	/* clearing an eoi bit may unmask another pending interrupt... */
  9.1331 -	if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled...
  9.1332 +	if (!vcpu->vcpu_info->evtchn_upcall_mask) {	// but only if enabled...
  9.1333  		// worry about this later... Linux only calls eoi
  9.1334  		// with interrupts disabled
  9.1335  		printf("Trying to EOI interrupt with interrupts enabled\n");
  9.1336  	}
  9.1337  	if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
  9.1338 -		PSCB(vcpu,pending_interruption) = 1;
  9.1339 +		PSCB(vcpu, pending_interruption) = 1;
  9.1340  //printf("YYYYY vcpu_set_eoi: Successful\n");
  9.1341 -	return (IA64_NO_FAULT);
  9.1342 +	return IA64_NO_FAULT;
  9.1343  }
  9.1344  
  9.1345 -IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
  9.1346 +IA64FAULT vcpu_set_lrr0(VCPU * vcpu, u64 val)
  9.1347  {
  9.1348  	if (!(val & (1L << 16))) {
  9.1349  		printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
  9.1350 -		return (IA64_ILLOP_FAULT);
  9.1351 +		return IA64_ILLOP_FAULT;
  9.1352  	}
  9.1353  	// no place to save this state but nothing to do anyway
  9.1354 -	return (IA64_NO_FAULT);
  9.1355 +	return IA64_NO_FAULT;
  9.1356  }
  9.1357  
  9.1358 -IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
  9.1359 +IA64FAULT vcpu_set_lrr1(VCPU * vcpu, u64 val)
  9.1360  {
  9.1361  	if (!(val & (1L << 16))) {
  9.1362  		printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
  9.1363 -		return (IA64_ILLOP_FAULT);
  9.1364 +		return IA64_ILLOP_FAULT;
  9.1365  	}
  9.1366  	// no place to save this state but nothing to do anyway
  9.1367 -	return (IA64_NO_FAULT);
  9.1368 +	return IA64_NO_FAULT;
  9.1369  }
  9.1370  
  9.1371 -IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
  9.1372 +IA64FAULT vcpu_set_itv(VCPU * vcpu, u64 val)
  9.1373  {
  9.1374  	/* Check reserved fields.  */
  9.1375  	if (val & 0xef00)
  9.1376 -		return (IA64_ILLOP_FAULT);
  9.1377 -	PSCB(vcpu,itv) = val;
  9.1378 +		return IA64_ILLOP_FAULT;
  9.1379 +	PSCB(vcpu, itv) = val;
  9.1380  	if (val & 0x10000) {
  9.1381  		/* Disable itm.  */
  9.1382 -		PSCBX(vcpu,domain_itm) = 0;
  9.1383 -	}
  9.1384 -	else vcpu_set_next_timer(vcpu);
  9.1385 -	return (IA64_NO_FAULT);
  9.1386 +		PSCBX(vcpu, domain_itm) = 0;
  9.1387 +	} else
  9.1388 +		vcpu_set_next_timer(vcpu);
  9.1389 +	return IA64_NO_FAULT;
  9.1390  }
  9.1391  
  9.1392 -IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
  9.1393 +IA64FAULT vcpu_set_pmv(VCPU * vcpu, u64 val)
  9.1394  {
  9.1395 -	if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
  9.1396 -	PSCB(vcpu,pmv) = val;
  9.1397 -	return (IA64_NO_FAULT);
  9.1398 +	if (val & 0xef00)	/* reserved fields */
  9.1399 +		return IA64_RSVDREG_FAULT;
  9.1400 +	PSCB(vcpu, pmv) = val;
  9.1401 +	return IA64_NO_FAULT;
  9.1402  }
  9.1403  
  9.1404 -IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
  9.1405 +IA64FAULT vcpu_set_cmcv(VCPU * vcpu, u64 val)
  9.1406  {
  9.1407 -	if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
  9.1408 -	PSCB(vcpu,cmcv) = val;
  9.1409 -	return (IA64_NO_FAULT);
  9.1410 +	if (val & 0xef00)	/* reserved fields */
  9.1411 +		return IA64_RSVDREG_FAULT;
  9.1412 +	PSCB(vcpu, cmcv) = val;
  9.1413 +	return IA64_NO_FAULT;
  9.1414  }
  9.1415  
  9.1416  /**************************************************************************
  9.1417   VCPU temporary register access routines
  9.1418  **************************************************************************/
  9.1419 -UINT64 vcpu_get_tmp(VCPU *vcpu, UINT64 index)
  9.1420 +u64 vcpu_get_tmp(VCPU * vcpu, u64 index)
  9.1421  {
  9.1422 -	if (index > 7) return 0;
  9.1423 -	return PSCB(vcpu,tmp[index]);
  9.1424 +	if (index > 7)
  9.1425 +		return 0;
  9.1426 +	return PSCB(vcpu, tmp[index]);
  9.1427  }
  9.1428  
  9.1429 -void vcpu_set_tmp(VCPU *vcpu, UINT64 index, UINT64 val)
  9.1430 +void vcpu_set_tmp(VCPU * vcpu, u64 index, u64 val)
  9.1431  {
  9.1432 -	if (index <= 7) PSCB(vcpu,tmp[index]) = val;
  9.1433 +	if (index <= 7)
  9.1434 +		PSCB(vcpu, tmp[index]) = val;
  9.1435  }
  9.1436  
  9.1437  /**************************************************************************
  9.1438  Interval timer routines
  9.1439  **************************************************************************/
  9.1440  
  9.1441 -BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
  9.1442 +BOOLEAN vcpu_timer_disabled(VCPU * vcpu)
  9.1443  {
  9.1444 -	UINT64 itv = PSCB(vcpu,itv);
  9.1445 -	return(!itv || !!(itv & 0x10000));
  9.1446 +	u64 itv = PSCB(vcpu, itv);
  9.1447 +	return (!itv || !!(itv & 0x10000));
  9.1448 +}
  9.1449 +
  9.1450 +BOOLEAN vcpu_timer_inservice(VCPU * vcpu)
  9.1451 +{
  9.1452 +	u64 itv = PSCB(vcpu, itv);
  9.1453 +	return test_bit(itv, PSCBX(vcpu, insvc));
  9.1454  }
  9.1455  
  9.1456 -BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
  9.1457 +BOOLEAN vcpu_timer_expired(VCPU * vcpu)
  9.1458  {
  9.1459 -	UINT64 itv = PSCB(vcpu,itv);
  9.1460 -	return (test_bit(itv, PSCBX(vcpu,insvc)));
  9.1461 -}
  9.1462 -
  9.1463 -BOOLEAN vcpu_timer_expired(VCPU *vcpu)
  9.1464 -{
  9.1465 -	unsigned long domain_itm = PSCBX(vcpu,domain_itm);
  9.1466 +	unsigned long domain_itm = PSCBX(vcpu, domain_itm);
  9.1467  	unsigned long now = ia64_get_itc();
  9.1468  
  9.1469 -	if (!domain_itm) return FALSE;
  9.1470 -	if (now < domain_itm) return FALSE;
  9.1471 -	if (vcpu_timer_disabled(vcpu)) return FALSE;
  9.1472 +	if (!domain_itm)
  9.1473 +		return FALSE;
  9.1474 +	if (now < domain_itm)
  9.1475 +		return FALSE;
  9.1476 +	if (vcpu_timer_disabled(vcpu))
  9.1477 +		return FALSE;
  9.1478  	return TRUE;
  9.1479  }
  9.1480  
  9.1481 @@ -1049,25 +1154,26 @@ void vcpu_safe_set_itm(unsigned long val
  9.1482  {
  9.1483  	unsigned long epsilon = 100;
  9.1484  	unsigned long flags;
  9.1485 -	UINT64 now = ia64_get_itc();
  9.1486 +	u64 now = ia64_get_itc();
  9.1487  
  9.1488  	local_irq_save(flags);
  9.1489  	while (1) {
  9.1490  //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
  9.1491  		ia64_set_itm(val);
  9.1492 -		if (val > (now = ia64_get_itc())) break;
  9.1493 +		if (val > (now = ia64_get_itc()))
  9.1494 +			break;
  9.1495  		val = now + epsilon;
  9.1496  		epsilon <<= 1;
  9.1497  	}
  9.1498  	local_irq_restore(flags);
  9.1499  }
  9.1500  
  9.1501 -void vcpu_set_next_timer(VCPU *vcpu)
  9.1502 +void vcpu_set_next_timer(VCPU * vcpu)
  9.1503  {
  9.1504 -	UINT64 d = PSCBX(vcpu,domain_itm);
  9.1505 -	//UINT64 s = PSCBX(vcpu,xen_itm);
  9.1506 -	UINT64 s = local_cpu_data->itm_next;
  9.1507 -	UINT64 now = ia64_get_itc();
  9.1508 +	u64 d = PSCBX(vcpu, domain_itm);
  9.1509 +	//u64 s = PSCBX(vcpu,xen_itm);
  9.1510 +	u64 s = local_cpu_data->itm_next;
  9.1511 +	u64 now = ia64_get_itc();
  9.1512  
  9.1513  	/* gloss over the wraparound problem for now... we know it exists
  9.1514  	 * but it doesn't matter right now */
  9.1515 @@ -1081,25 +1187,24 @@ void vcpu_set_next_timer(VCPU *vcpu)
  9.1516  	if (d && (d > now) && (d < s)) {
  9.1517  		vcpu_safe_set_itm(d);
  9.1518  		//using_domain_as_itm++;
  9.1519 -	}
  9.1520 -	else {
  9.1521 +	} else {
  9.1522  		vcpu_safe_set_itm(s);
  9.1523  		//using_xen_as_itm++;
  9.1524  	}
  9.1525  }
  9.1526  
  9.1527 -IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
  9.1528 +IA64FAULT vcpu_set_itm(VCPU * vcpu, u64 val)
  9.1529  {
  9.1530  	//UINT now = ia64_get_itc();
  9.1531  
  9.1532  	//if (val < now) val = now + 1000;
  9.1533  //printf("*** vcpu_set_itm: called with %lx\n",val);
  9.1534 -	PSCBX(vcpu,domain_itm) = val;
  9.1535 +	PSCBX(vcpu, domain_itm) = val;
  9.1536  	vcpu_set_next_timer(vcpu);
  9.1537 -	return (IA64_NO_FAULT);
  9.1538 +	return IA64_NO_FAULT;
  9.1539  }
  9.1540  
  9.1541 -IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
  9.1542 +IA64FAULT vcpu_set_itc(VCPU * vcpu, u64 val)
  9.1543  {
  9.1544  #define DISALLOW_SETTING_ITC_FOR_NOW
  9.1545  #ifdef DISALLOW_SETTING_ITC_FOR_NOW
  9.1546 @@ -1110,58 +1215,59 @@ IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT6
  9.1547  		did_print = 1;
  9.1548  	}
  9.1549  #else
  9.1550 -	UINT64 oldnow = ia64_get_itc();
  9.1551 -	UINT64 olditm = PSCBX(vcpu,domain_itm);
  9.1552 +	u64 oldnow = ia64_get_itc();
  9.1553 +	u64 olditm = PSCBX(vcpu, domain_itm);
  9.1554  	unsigned long d = olditm - oldnow;
  9.1555  	unsigned long x = local_cpu_data->itm_next - oldnow;
  9.1556  
  9.1557 -	UINT64 newnow = val, min_delta;
  9.1558 +	u64 newnow = val, min_delta;
  9.1559  
  9.1560  	local_irq_disable();
  9.1561  	if (olditm) {
  9.1562 -printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
  9.1563 -		PSCBX(vcpu,domain_itm) = newnow + d;
  9.1564 +		printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n", val,
  9.1565 +		       newnow + d);
  9.1566 +		PSCBX(vcpu, domain_itm) = newnow + d;
  9.1567  	}
  9.1568  	local_cpu_data->itm_next = newnow + x;
  9.1569 -	d = PSCBX(vcpu,domain_itm);
  9.1570 +	d = PSCBX(vcpu, domain_itm);
  9.1571  	x = local_cpu_data->itm_next;
  9.1572  
  9.1573  	ia64_set_itc(newnow);
  9.1574  	if (d && (d > newnow) && (d < x)) {
  9.1575  		vcpu_safe_set_itm(d);
  9.1576  		//using_domain_as_itm++;
  9.1577 -	}
  9.1578 -	else {
  9.1579 +	} else {
  9.1580  		vcpu_safe_set_itm(x);
  9.1581  		//using_xen_as_itm++;
  9.1582  	}
  9.1583  	local_irq_enable();
  9.1584  #endif
  9.1585 -	return (IA64_NO_FAULT);
  9.1586 +	return IA64_NO_FAULT;
  9.1587  }
  9.1588  
  9.1589 -IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
  9.1590 +IA64FAULT vcpu_get_itm(VCPU * vcpu, u64 * pval)
  9.1591  {
  9.1592  	//FIXME: Implement this
  9.1593  	printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
  9.1594 -	return (IA64_NO_FAULT);
  9.1595 -	//return (IA64_ILLOP_FAULT);
  9.1596 +	return IA64_NO_FAULT;
  9.1597 +	//return IA64_ILLOP_FAULT;
  9.1598  }
  9.1599  
  9.1600 -IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
  9.1601 +IA64FAULT vcpu_get_itc(VCPU * vcpu, u64 * pval)
  9.1602  {
  9.1603  	//TODO: Implement this
  9.1604  	printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
  9.1605 -	return (IA64_ILLOP_FAULT);
  9.1606 +	return IA64_ILLOP_FAULT;
  9.1607  }
  9.1608  
  9.1609 -void vcpu_pend_timer(VCPU *vcpu)
  9.1610 +void vcpu_pend_timer(VCPU * vcpu)
  9.1611  {
  9.1612 -	UINT64 itv = PSCB(vcpu,itv) & 0xff;
  9.1613 +	u64 itv = PSCB(vcpu, itv) & 0xff;
  9.1614  
  9.1615 -	if (vcpu_timer_disabled(vcpu)) return;
  9.1616 +	if (vcpu_timer_disabled(vcpu))
  9.1617 +		return;
  9.1618  	//if (vcpu_timer_inservice(vcpu)) return;
  9.1619 -	if (PSCBX(vcpu,domain_itm_last) == PSCBX(vcpu,domain_itm)) {
  9.1620 +	if (PSCBX(vcpu, domain_itm_last) == PSCBX(vcpu, domain_itm)) {
  9.1621  		// already delivered an interrupt for this so
  9.1622  		// don't deliver another
  9.1623  		return;
  9.1624 @@ -1179,13 +1285,15 @@ void vcpu_pend_timer(VCPU *vcpu)
  9.1625  }
  9.1626  
  9.1627  // returns true if ready to deliver a timer interrupt too early
  9.1628 -UINT64 vcpu_timer_pending_early(VCPU *vcpu)
  9.1629 +u64 vcpu_timer_pending_early(VCPU * vcpu)
  9.1630  {
  9.1631 -	UINT64 now = ia64_get_itc();
  9.1632 -	UINT64 itm = PSCBX(vcpu,domain_itm);
  9.1633 +	u64 now = ia64_get_itc();
  9.1634 +	u64 itm = PSCBX(vcpu, domain_itm);
  9.1635  
  9.1636 -	if (vcpu_timer_disabled(vcpu)) return 0;
  9.1637 -	if (!itm) return 0;
  9.1638 +	if (vcpu_timer_disabled(vcpu))
  9.1639 +		return 0;
  9.1640 +	if (!itm)
  9.1641 +		return 0;
  9.1642  	return (vcpu_deliverable_timer(vcpu) && (now < itm));
  9.1643  }
  9.1644  
  9.1645 @@ -1193,120 +1301,129 @@ UINT64 vcpu_timer_pending_early(VCPU *vc
  9.1646  Privileged operation emulation routines
  9.1647  **************************************************************************/
  9.1648  
  9.1649 -static void
  9.1650 -vcpu_force_tlb_miss(VCPU* vcpu, UINT64 ifa)
  9.1651 +static void vcpu_force_tlb_miss(VCPU * vcpu, u64 ifa)
  9.1652  {
  9.1653  	PSCB(vcpu, ifa) = ifa;
  9.1654  	PSCB(vcpu, itir) = vcpu_get_itir_on_fault(vcpu, ifa);
  9.1655  	vcpu_thash(current, ifa, &PSCB(current, iha));
  9.1656  }
  9.1657  
  9.1658 -IA64FAULT vcpu_force_inst_miss(VCPU *vcpu, UINT64 ifa)
  9.1659 +IA64FAULT vcpu_force_inst_miss(VCPU * vcpu, u64 ifa)
  9.1660  {
  9.1661  	vcpu_force_tlb_miss(vcpu, ifa);
  9.1662 -	return (vcpu_get_rr_ve(vcpu, ifa)? IA64_INST_TLB_VECTOR: IA64_ALT_INST_TLB_VECTOR);
  9.1663 +	return vcpu_get_rr_ve(vcpu, ifa) ? IA64_INST_TLB_VECTOR :
  9.1664 +		IA64_ALT_INST_TLB_VECTOR;
  9.1665  }
  9.1666  
  9.1667 -IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
  9.1668 +IA64FAULT vcpu_force_data_miss(VCPU * vcpu, u64 ifa)
  9.1669  {
  9.1670  	vcpu_force_tlb_miss(vcpu, ifa);
  9.1671 -	return (vcpu_get_rr_ve(vcpu, ifa)? IA64_DATA_TLB_VECTOR: IA64_ALT_DATA_TLB_VECTOR);
  9.1672 +	return vcpu_get_rr_ve(vcpu, ifa) ? IA64_DATA_TLB_VECTOR :
  9.1673 +		IA64_ALT_DATA_TLB_VECTOR;
  9.1674  }
  9.1675  
  9.1676 -IA64FAULT vcpu_rfi(VCPU *vcpu)
  9.1677 +IA64FAULT vcpu_rfi(VCPU * vcpu)
  9.1678  {
  9.1679  	// TODO: Only allowed for current vcpu
  9.1680  	PSR psr;
  9.1681 -	UINT64 int_enable, regspsr = 0;
  9.1682 -	UINT64 ifs;
  9.1683 +	u64 int_enable, regspsr = 0;
  9.1684 +	u64 ifs;
  9.1685  	REGS *regs = vcpu_regs(vcpu);
  9.1686  	extern void dorfirfi(void);
  9.1687  
  9.1688 -	psr.i64 = PSCB(vcpu,ipsr);
  9.1689 -	if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
  9.1690 +	psr.i64 = PSCB(vcpu, ipsr);
  9.1691 +	if (psr.ia64_psr.cpl < 3)
  9.1692 +		psr.ia64_psr.cpl = 2;
  9.1693  	int_enable = psr.ia64_psr.i;
  9.1694 -	if (psr.ia64_psr.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
  9.1695 -	if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
  9.1696 -	else vcpu_set_metaphysical_mode(vcpu,TRUE);
  9.1697 -	psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
  9.1698 -	psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
  9.1699 +	if (psr.ia64_psr.ic)
  9.1700 +		PSCB(vcpu, interrupt_collection_enabled) = 1;
  9.1701 +	if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it)
  9.1702 +		vcpu_set_metaphysical_mode(vcpu, FALSE);
  9.1703 +	else
  9.1704 +		vcpu_set_metaphysical_mode(vcpu, TRUE);
  9.1705 +	psr.ia64_psr.ic = 1;
  9.1706 +	psr.ia64_psr.i = 1;
  9.1707 +	psr.ia64_psr.dt = 1;
  9.1708 +	psr.ia64_psr.rt = 1;
  9.1709 +	psr.ia64_psr.it = 1;
  9.1710  	psr.ia64_psr.bn = 1;
  9.1711  	//psr.pk = 1;  // checking pkeys shouldn't be a problem but seems broken
  9.1712  	if (psr.ia64_psr.be) {
  9.1713  		printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
  9.1714 -		return (IA64_ILLOP_FAULT);
  9.1715 +		return IA64_ILLOP_FAULT;
  9.1716  	}
  9.1717 -	PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
  9.1718 -	ifs = PSCB(vcpu,ifs);
  9.1719 +	PSCB(vcpu, incomplete_regframe) = 0;	// is this necessary?
  9.1720 +	ifs = PSCB(vcpu, ifs);
  9.1721  	//if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
  9.1722  	//if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
  9.1723  	if (ifs & regs->cr_ifs & 0x8000000000000000L) {
  9.1724  		// TODO: validate PSCB(vcpu,iip)
  9.1725  		// TODO: PSCB(vcpu,ipsr) = psr;
  9.1726 -		PSCB(vcpu,ipsr) = psr.i64;
  9.1727 +		PSCB(vcpu, ipsr) = psr.i64;
  9.1728  		// now set up the trampoline
  9.1729  		regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
  9.1730 -		__asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
  9.1731 -		regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
  9.1732 +		__asm__ __volatile("mov %0=psr;;":"=r"(regspsr)::"memory");
  9.1733 +		regs->cr_ipsr =
  9.1734 +		    regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
  9.1735 +	} else {
  9.1736 +		regs->cr_ipsr = psr.i64;
  9.1737 +		regs->cr_iip = PSCB(vcpu, iip);
  9.1738  	}
  9.1739 -	else {
  9.1740 -		regs->cr_ipsr = psr.i64;
  9.1741 -		regs->cr_iip = PSCB(vcpu,iip);
  9.1742 -	}
  9.1743 -	PSCB(vcpu,interrupt_collection_enabled) = 1;
  9.1744 +	PSCB(vcpu, interrupt_collection_enabled) = 1;
  9.1745  	vcpu_bsw1(vcpu);
  9.1746  	vcpu->vcpu_info->evtchn_upcall_mask = !int_enable;
  9.1747 -	return (IA64_NO_FAULT);
  9.1748 +	return IA64_NO_FAULT;
  9.1749  }
  9.1750  
  9.1751 -IA64FAULT vcpu_cover(VCPU *vcpu)
  9.1752 +IA64FAULT vcpu_cover(VCPU * vcpu)
  9.1753  {
  9.1754  	// TODO: Only allowed for current vcpu
  9.1755  	REGS *regs = vcpu_regs(vcpu);
  9.1756  
  9.1757 -	if (!PSCB(vcpu,interrupt_collection_enabled)) {
  9.1758 -		if (!PSCB(vcpu,incomplete_regframe))
  9.1759 -			PSCB(vcpu,ifs) = regs->cr_ifs;
  9.1760 -		else PSCB(vcpu,incomplete_regframe) = 0;
  9.1761 +	if (!PSCB(vcpu, interrupt_collection_enabled)) {
  9.1762 +		if (!PSCB(vcpu, incomplete_regframe))
  9.1763 +			PSCB(vcpu, ifs) = regs->cr_ifs;
  9.1764 +		else
  9.1765 +			PSCB(vcpu, incomplete_regframe) = 0;
  9.1766  	}
  9.1767  	regs->cr_ifs = 0;
  9.1768 -	return (IA64_NO_FAULT);
  9.1769 +	return IA64_NO_FAULT;
  9.1770  }
  9.1771  
  9.1772 -IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
  9.1773 +IA64FAULT vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval)
  9.1774  {
  9.1775 -	UINT64 pta = PSCB(vcpu,pta);
  9.1776 -	UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
  9.1777 -	UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
  9.1778 -	UINT64 Mask = (1L << pta_sz) - 1;
  9.1779 -	UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
  9.1780 -	UINT64 compMask_60_15 = ~Mask_60_15;
  9.1781 -	UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
  9.1782 -	UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
  9.1783 -	UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
  9.1784 -	UINT64 VHPT_addr2a =
  9.1785 -		((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
  9.1786 -	UINT64 VHPT_addr2b =
  9.1787 -		((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;
  9.1788 -	UINT64 VHPT_addr3 = VHPT_offset & 0x7fff;
  9.1789 -	UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
  9.1790 -			VHPT_addr3;
  9.1791 +	u64 pta = PSCB(vcpu, pta);
  9.1792 +	u64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
  9.1793 +	u64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT) - 1);
  9.1794 +	u64 Mask = (1L << pta_sz) - 1;
  9.1795 +	u64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
  9.1796 +	u64 compMask_60_15 = ~Mask_60_15;
  9.1797 +	u64 rr_ps = vcpu_get_rr_ps(vcpu, vadr);
  9.1798 +	u64 VHPT_offset = (vadr >> rr_ps) << 3;
  9.1799 +	u64 VHPT_addr1 = vadr & 0xe000000000000000L;
  9.1800 +	u64 VHPT_addr2a =
  9.1801 +	    ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
  9.1802 +	u64 VHPT_addr2b =
  9.1803 +	    ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;
  9.1804 +	u64 VHPT_addr3 = VHPT_offset & 0x7fff;
  9.1805 +	u64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
  9.1806 +	    VHPT_addr3;
  9.1807  
  9.1808  //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
  9.1809  	*pval = VHPT_addr;
  9.1810 -	return (IA64_NO_FAULT);
  9.1811 +	return IA64_NO_FAULT;
  9.1812  }
  9.1813  
  9.1814 -IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
  9.1815 +IA64FAULT vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * padr)
  9.1816  {
  9.1817  	printf("vcpu_ttag: ttag instruction unsupported\n");
  9.1818 -	return (IA64_ILLOP_FAULT);
  9.1819 +	return IA64_ILLOP_FAULT;
  9.1820  }
  9.1821  
  9.1822 -int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
  9.1823 +int warn_region0_address = 0;	// FIXME later: tie to a boot parameter?
  9.1824  
  9.1825  /* Return TRUE iff [b1,e1] and [b2,e2] partially or fully overlaps.  */
  9.1826 -static inline int range_overlap (u64 b1, u64 e1, u64 b2, u64 e2)
  9.1827 +static inline int range_overlap(u64 b1, u64 e1, u64 b2, u64 e2)
  9.1828  {
  9.1829  	return (b1 <= e2) && (e1 >= b2);
  9.1830  }
  9.1831 @@ -1314,7 +1431,7 @@ static inline int range_overlap (u64 b1,
  9.1832  /* Crash domain if [base, base + page_size] and Xen virtual space overlaps.
  9.1833     Note: LSBs of base inside page_size are ignored.  */
  9.1834  static inline void
  9.1835 -check_xen_space_overlap (const char *func, u64 base, u64 page_size)
  9.1836 +check_xen_space_overlap(const char *func, u64 base, u64 page_size)
  9.1837  {
  9.1838  	/* Overlaps can occur only in region 7.
  9.1839  	   (This is an optimization to bypass all the checks).  */
  9.1840 @@ -1325,43 +1442,42 @@ check_xen_space_overlap (const char *fun
  9.1841  	base &= ~(page_size - 1);
  9.1842  
  9.1843  	/* FIXME: ideally an MCA should be generated...  */
  9.1844 -	if (range_overlap (HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END,
  9.1845 -	                   base, base + page_size)
  9.1846 +	if (range_overlap(HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END,
  9.1847 +			  base, base + page_size)
  9.1848  	    || range_overlap(current->domain->arch.shared_info_va,
  9.1849 -	                     current->domain->arch.shared_info_va 
  9.1850 -	                     + XSI_SIZE + XMAPPEDREGS_SIZE,
  9.1851 -	                     base, base + page_size))
  9.1852 -		panic_domain (NULL, "%s on Xen virtual space (%lx)\n",
  9.1853 -			      func, base);
  9.1854 +			     current->domain->arch.shared_info_va
  9.1855 +			     + XSI_SIZE + XMAPPEDREGS_SIZE,
  9.1856 +			     base, base + page_size))
  9.1857 +		panic_domain(NULL, "%s on Xen virtual space (%lx)\n",
  9.1858 +			     func, base);
  9.1859  }
  9.1860  
  9.1861  // FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
  9.1862 -static inline int vcpu_match_tr_entry_no_p(TR_ENTRY *trp, UINT64 ifa, UINT64 rid)
  9.1863 +static inline int vcpu_match_tr_entry_no_p(TR_ENTRY * trp, u64 ifa,
  9.1864 +                                           u64 rid)
  9.1865  {
  9.1866 -	return trp->rid == rid 
  9.1867 -		&& ifa >= trp->vadr
  9.1868 -		&& ifa <= (trp->vadr + (1L << trp->ps) - 1);
  9.1869 +	return trp->rid == rid
  9.1870 +	    && ifa >= trp->vadr && ifa <= (trp->vadr + (1L << trp->ps) - 1);
  9.1871  }
  9.1872  
  9.1873 -static inline int vcpu_match_tr_entry(TR_ENTRY *trp, UINT64 ifa, UINT64 rid)
  9.1874 +static inline int vcpu_match_tr_entry(TR_ENTRY * trp, u64 ifa, u64 rid)
  9.1875  {
  9.1876  	return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
  9.1877  }
  9.1878  
  9.1879  static inline int
  9.1880 -vcpu_match_tr_entry_range(TR_ENTRY *trp, UINT64 rid, u64 b, u64 e)
  9.1881 +vcpu_match_tr_entry_range(TR_ENTRY * trp, u64 rid, u64 b, u64 e)
  9.1882  {
  9.1883  	return trp->rid == rid
  9.1884 -		&& trp->pte.p
  9.1885 -		&& range_overlap (b, e,
  9.1886 -				  trp->vadr, trp->vadr + (1L << trp->ps) - 1);
  9.1887 +	    && trp->pte.p
  9.1888 +	    && range_overlap(b, e, trp->vadr, trp->vadr + (1L << trp->ps) - 1);
  9.1889  
  9.1890  }
  9.1891  
  9.1892 -static TR_ENTRY*
  9.1893 -vcpu_tr_lookup(VCPU* vcpu, unsigned long va, UINT64 rid, BOOLEAN is_data)
  9.1894 +static TR_ENTRY *vcpu_tr_lookup(VCPU * vcpu, unsigned long va, u64 rid,
  9.1895 +                                BOOLEAN is_data)
  9.1896  {
  9.1897 -	unsigned char* regions;
  9.1898 +	unsigned char *regions;
  9.1899  	TR_ENTRY *trp;
  9.1900  	int tr_max;
  9.1901  	int i;
  9.1902 @@ -1370,12 +1486,12 @@ vcpu_tr_lookup(VCPU* vcpu, unsigned long
  9.1903  		// data
  9.1904  		regions = &vcpu->arch.dtr_regions;
  9.1905  		trp = vcpu->arch.dtrs;
  9.1906 -		tr_max = sizeof(vcpu->arch.dtrs)/sizeof(vcpu->arch.dtrs[0]);
  9.1907 +		tr_max = sizeof(vcpu->arch.dtrs) / sizeof(vcpu->arch.dtrs[0]);
  9.1908  	} else {
  9.1909  		// instruction
  9.1910  		regions = &vcpu->arch.itr_regions;
  9.1911  		trp = vcpu->arch.itrs;
  9.1912 -		tr_max = sizeof(vcpu->arch.itrs)/sizeof(vcpu->arch.itrs[0]);
  9.1913 +		tr_max = sizeof(vcpu->arch.itrs) / sizeof(vcpu->arch.itrs[0]);
  9.1914  	}
  9.1915  
  9.1916  	if (!vcpu_quick_region_check(*regions, va)) {
  9.1917 @@ -1393,13 +1509,14 @@ vcpu_tr_lookup(VCPU* vcpu, unsigned long
  9.1918  // 0: failure
  9.1919  // 1: success
  9.1920  int
  9.1921 -vcpu_get_domain_bundle(VCPU* vcpu, REGS* regs, UINT64 gip, IA64_BUNDLE* bundle)
  9.1922 +vcpu_get_domain_bundle(VCPU * vcpu, REGS * regs, u64 gip,
  9.1923 +                       IA64_BUNDLE * bundle)
  9.1924  {
  9.1925 -	UINT64 gpip;// guest pseudo phyiscal ip
  9.1926 +	u64 gpip;		// guest pseudo phyiscal ip
  9.1927  	unsigned long vaddr;
  9.1928 -	struct page_info* page;
  9.1929 +	struct page_info *page;
  9.1930  
  9.1931 -again:
  9.1932 + again:
  9.1933  #if 0
  9.1934  	// Currently xen doesn't track psr.it bits.
  9.1935  	// it assumes always psr.it = 1.
  9.1936 @@ -1412,7 +1529,7 @@ again:
  9.1937  		unsigned long rr = PSCB(vcpu, rrs)[region];
  9.1938  		unsigned long rid = rr & RR_RID_MASK;
  9.1939  		BOOLEAN swap_rr0;
  9.1940 -		TR_ENTRY* trp;
  9.1941 +		TR_ENTRY *trp;
  9.1942  
  9.1943  		// vcpu->arch.{i, d}tlb are volatile,
  9.1944  		// copy its value to the variable, tr, before use.
  9.1945 @@ -1427,7 +1544,8 @@ again:
  9.1946  		// Last itc.i value is cached to PSCBX(vcpu, itlb).
  9.1947  		tr = PSCBX(vcpu, itlb);
  9.1948  		if (vcpu_match_tr_entry(&tr, gip, rid)) {
  9.1949 -			//DPRINTK("%s gip 0x%lx gpip 0x%lx\n", __func__, gip, gpip);
  9.1950 +			//DPRINTK("%s gip 0x%lx gpip 0x%lx\n", __func__,
  9.1951 +			//	gip, gpip);
  9.1952  			goto found;
  9.1953  		}
  9.1954  		trp = vcpu_tr_lookup(vcpu, gip, rid, 1);
  9.1955 @@ -1457,43 +1575,43 @@ again:
  9.1956  			return 0;
  9.1957  		}
  9.1958  		return 1;
  9.1959 -        
  9.1960 +
  9.1961  	found:
  9.1962  		gpip = ((tr.pte.ppn >> (tr.ps - 12)) << tr.ps) |
  9.1963  			(gip & ((1 << tr.ps) - 1));
  9.1964  	}
  9.1965 -	
  9.1966 +
  9.1967  	vaddr = (unsigned long)domain_mpa_to_imva(vcpu->domain, gpip);
  9.1968  	page = virt_to_page(vaddr);
  9.1969  	if (get_page(page, vcpu->domain) == 0) {
  9.1970  		if (page_get_owner(page) != vcpu->domain) {
  9.1971  			// This page might be a page granted by another
  9.1972  			// domain.
  9.1973 -			panic_domain(regs,
  9.1974 -				     "domain tries to execute foreign domain "
  9.1975 -				     "page which might be mapped by grant "
  9.1976 -				     "table.\n");
  9.1977 +			panic_domain(regs, "domain tries to execute foreign "
  9.1978 +				     "domain page which might be mapped by "
  9.1979 +				     "grant table.\n");
  9.1980  		}
  9.1981  		goto again;
  9.1982  	}
  9.1983 -	*bundle = *((IA64_BUNDLE*)vaddr);
  9.1984 +	*bundle = *((IA64_BUNDLE *) vaddr);
  9.1985  	put_page(page);
  9.1986  	return 1;
  9.1987  }
  9.1988  
  9.1989 -IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
  9.1990 +IA64FAULT vcpu_translate(VCPU * vcpu, u64 address, BOOLEAN is_data,
  9.1991 +			 u64 * pteval, u64 * itir, u64 * iha)
  9.1992  {
  9.1993  	unsigned long region = address >> 61;
  9.1994  	unsigned long pta, rid, rr;
  9.1995  	union pte_flags pte;
  9.1996  	TR_ENTRY *trp;
  9.1997  
  9.1998 -	if (PSCB(vcpu,metaphysical_mode) && !(!is_data && region)) {
  9.1999 +	if (PSCB(vcpu, metaphysical_mode) && !(!is_data && region)) {
  9.2000  		// dom0 may generate an uncacheable physical address (msb=1)
  9.2001  		if (region && ((region != 4) || (vcpu->domain != dom0))) {
  9.2002  // FIXME: This seems to happen even though it shouldn't.  Need to track
  9.2003  // this down, but since it has been apparently harmless, just flag it for now
  9.2004 -//			panic_domain(vcpu_regs(vcpu),
  9.2005 +//                      panic_domain(vcpu_regs(vcpu),
  9.2006  
  9.2007  			/*
  9.2008  			 * Guest may execute itc.d and rfi with psr.dt=0
  9.2009 @@ -1501,29 +1619,29 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
  9.2010  			 * At this time PSCB(vcpu,metaphysical_mode)=1,
  9.2011  			 * region=5,VMM need to handle this tlb miss as if
  9.2012  			 * PSCB(vcpu,metaphysical_mode)=0
  9.2013 -			 */           
  9.2014 -			printk("vcpu_translate: bad physical address: 0x%lx at %lx\n",
  9.2015 -			       address, vcpu_regs (vcpu)->cr_iip);
  9.2016 +			 */
  9.2017 +			printk("vcpu_translate: bad physical address: 0x%lx "
  9.2018 +			       "at %lx\n", address, vcpu_regs(vcpu)->cr_iip);
  9.2019  
  9.2020  		} else {
  9.2021 -			*pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS |
  9.2022 -			          _PAGE_PL_2 | _PAGE_AR_RWX;
  9.2023 +			*pteval = (address & _PAGE_PPN_MASK) |
  9.2024 +				__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
  9.2025  			*itir = PAGE_SHIFT << 2;
  9.2026  			perfc_incrc(phys_translate);
  9.2027  			return IA64_NO_FAULT;
  9.2028  		}
  9.2029 -	}
  9.2030 -	else if (!region && warn_region0_address) {
  9.2031 +	} else if (!region && warn_region0_address) {
  9.2032  		REGS *regs = vcpu_regs(vcpu);
  9.2033 -		unsigned long viip = PSCB(vcpu,iip);
  9.2034 -		unsigned long vipsr = PSCB(vcpu,ipsr);
  9.2035 +		unsigned long viip = PSCB(vcpu, iip);
  9.2036 +		unsigned long vipsr = PSCB(vcpu, ipsr);
  9.2037  		unsigned long iip = regs->cr_iip;
  9.2038  		unsigned long ipsr = regs->cr_ipsr;
  9.2039 -		printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
  9.2040 -			address, viip, vipsr, iip, ipsr);
  9.2041 +		printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, "
  9.2042 +		       "vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
  9.2043 +		       address, viip, vipsr, iip, ipsr);
  9.2044  	}
  9.2045  
  9.2046 -	rr = PSCB(vcpu,rrs)[region];
  9.2047 +	rr = PSCB(vcpu, rrs)[region];
  9.2048  	rid = rr & RR_RID_MASK;
  9.2049  	if (is_data) {
  9.2050  		trp = vcpu_tr_lookup(vcpu, address, rid, 1);
  9.2051 @@ -1535,7 +1653,7 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
  9.2052  		}
  9.2053  	}
  9.2054  	// FIXME?: check itr's for data accesses too, else bad things happen?
  9.2055 -	/* else */ {
  9.2056 +	/* else */  {
  9.2057  		trp = vcpu_tr_lookup(vcpu, address, rid, 0);
  9.2058  		if (trp != NULL) {
  9.2059  			*pteval = trp->pte.val;
  9.2060 @@ -1549,8 +1667,8 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
  9.2061  	// FIXME?: check dtlb for inst accesses too, else bad things happen?
  9.2062  	trp = &vcpu->arch.dtlb;
  9.2063  	pte = trp->pte;
  9.2064 -	if (/* is_data && */ pte.p
  9.2065 -	    && vcpu_match_tr_entry_no_p(trp,address,rid)) {
  9.2066 +	if ( /* is_data && */ pte.p
  9.2067 +	    && vcpu_match_tr_entry_no_p(trp, address, rid)) {
  9.2068  		*pteval = pte.val;
  9.2069  		*itir = trp->itir;
  9.2070  		perfc_incrc(dtlb_translate);
  9.2071 @@ -1558,10 +1676,10 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
  9.2072  	}
  9.2073  
  9.2074  	/* check guest VHPT */
  9.2075 -	pta = PSCB(vcpu,pta);
  9.2076 +	pta = PSCB(vcpu, pta);
  9.2077  	if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
  9.2078 -		panic_domain(vcpu_regs(vcpu),"can't do long format VHPT\n");
  9.2079 -		//return (is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR);
  9.2080 +		panic_domain(vcpu_regs(vcpu), "can't do long format VHPT\n");
  9.2081 +		//return is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR;
  9.2082  	}
  9.2083  
  9.2084  	*itir = rr & (RR_RID_MASK | RR_PS_MASK);
  9.2085 @@ -1569,24 +1687,25 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
  9.2086  	// xenlinux depends on it so should document it as part of PV interface
  9.2087  	vcpu_thash(vcpu, address, iha);
  9.2088  	if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE))
  9.2089 -		return (is_data ? IA64_ALT_DATA_TLB_VECTOR : IA64_ALT_INST_TLB_VECTOR);
  9.2090 +		return is_data ? IA64_ALT_DATA_TLB_VECTOR :
  9.2091 +			IA64_ALT_INST_TLB_VECTOR;
  9.2092  
  9.2093  	/* avoid recursively walking (short format) VHPT */
  9.2094  	if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
  9.2095 -		return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
  9.2096 +		return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
  9.2097  
  9.2098 -	if (!__access_ok (*iha)
  9.2099 +	if (!__access_ok(*iha)
  9.2100  	    || __copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
  9.2101  		// virtual VHPT walker "missed" in TLB
  9.2102  		return IA64_VHPT_FAULT;
  9.2103  
  9.2104  	/*
  9.2105 -	* Optimisation: this VHPT walker aborts on not-present pages
  9.2106 -	* instead of inserting a not-present translation, this allows
  9.2107 -	* vectoring directly to the miss handler.
  9.2108 -	*/
  9.2109 +	 * Optimisation: this VHPT walker aborts on not-present pages
  9.2110 +	 * instead of inserting a not-present translation, this allows
  9.2111 +	 * vectoring directly to the miss handler.
  9.2112 +	 */
  9.2113  	if (!pte.p)
  9.2114 -		return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
  9.2115 +		return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
  9.2116  
  9.2117  	/* found mapping in guest VHPT! */
  9.2118  	*itir = rr & RR_PS_MASK;
  9.2119 @@ -1595,25 +1714,24 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
  9.2120  	return IA64_NO_FAULT;
  9.2121  }
  9.2122  
  9.2123 -IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
  9.2124 +IA64FAULT vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr)
  9.2125  {
  9.2126 -	UINT64 pteval, itir, mask, iha;
  9.2127 +	u64 pteval, itir, mask, iha;
  9.2128  	IA64FAULT fault;
  9.2129  
  9.2130  	fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
  9.2131 -	if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
  9.2132 -	{
  9.2133 +	if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
  9.2134  		mask = itir_mask(itir);
  9.2135  		*padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
  9.2136 -		return (IA64_NO_FAULT);
  9.2137 +		return IA64_NO_FAULT;
  9.2138  	}
  9.2139 -	return vcpu_force_data_miss(vcpu,vadr);
  9.2140 +	return vcpu_force_data_miss(vcpu, vadr);
  9.2141  }
  9.2142  
  9.2143 -IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
  9.2144 +IA64FAULT vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key)
  9.2145  {
  9.2146  	printf("vcpu_tak: tak instruction unsupported\n");
  9.2147 -	return (IA64_ILLOP_FAULT);
  9.2148 +	return IA64_ILLOP_FAULT;
  9.2149  	// HACK ALERT: tak does a thash for now
  9.2150  	//return vcpu_thash(vcpu,vadr,key);
  9.2151  }
  9.2152 @@ -1622,84 +1740,84 @@ IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 va
  9.2153   VCPU debug breakpoint register access routines
  9.2154  **************************************************************************/
  9.2155  
  9.2156 -IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
  9.2157 +IA64FAULT vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
  9.2158  {
  9.2159  	// TODO: unimplemented DBRs return a reserved register fault
  9.2160  	// TODO: Should set Logical CPU state, not just physical
  9.2161 -	ia64_set_dbr(reg,val);
  9.2162 -	return (IA64_NO_FAULT);
  9.2163 +	ia64_set_dbr(reg, val);
  9.2164 +	return IA64_NO_FAULT;
  9.2165  }
  9.2166  
  9.2167 -IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
  9.2168 +IA64FAULT vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val)
  9.2169  {
  9.2170  	// TODO: unimplemented IBRs return a reserved register fault
  9.2171  	// TODO: Should set Logical CPU state, not just physical
  9.2172 -	ia64_set_ibr(reg,val);
  9.2173 -	return (IA64_NO_FAULT);
  9.2174 +	ia64_set_ibr(reg, val);
  9.2175 +	return IA64_NO_FAULT;
  9.2176  }
  9.2177  
  9.2178 -IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  9.2179 +IA64FAULT vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval)
  9.2180  {
  9.2181  	// TODO: unimplemented DBRs return a reserved register fault
  9.2182 -	UINT64 val = ia64_get_dbr(reg);
  9.2183 +	u64 val = ia64_get_dbr(reg);
  9.2184  	*pval = val;
  9.2185 -	return (IA64_NO_FAULT);
  9.2186 +	return IA64_NO_FAULT;
  9.2187  }
  9.2188  
  9.2189 -IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  9.2190 +IA64FAULT vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval)
  9.2191  {
  9.2192  	// TODO: unimplemented IBRs return a reserved register fault
  9.2193 -	UINT64 val = ia64_get_ibr(reg);
  9.2194 +	u64 val = ia64_get_ibr(reg);
  9.2195  	*pval = val;
  9.2196 -	return (IA64_NO_FAULT);
  9.2197 +	return IA64_NO_FAULT;
  9.2198  }
  9.2199  
  9.2200  /**************************************************************************
  9.2201   VCPU performance monitor register access routines
  9.2202  **************************************************************************/
  9.2203  
  9.2204 -IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
  9.2205 +IA64FAULT vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val)
  9.2206  {
  9.2207  	// TODO: Should set Logical CPU state, not just physical
  9.2208  	// NOTE: Writes to unimplemented PMC registers are discarded
  9.2209  #ifdef DEBUG_PFMON
  9.2210 -printf("vcpu_set_pmc(%x,%lx)\n",reg,val);
  9.2211 +	printf("vcpu_set_pmc(%x,%lx)\n", reg, val);
  9.2212  #endif
  9.2213 -	ia64_set_pmc(reg,val);
  9.2214 -	return (IA64_NO_FAULT);
  9.2215 +	ia64_set_pmc(reg, val);
  9.2216 +	return IA64_NO_FAULT;
  9.2217  }
  9.2218  
  9.2219 -IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
  9.2220 +IA64FAULT vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val)
  9.2221  {
  9.2222  	// TODO: Should set Logical CPU state, not just physical
  9.2223  	// NOTE: Writes to unimplemented PMD registers are discarded
  9.2224  #ifdef DEBUG_PFMON
  9.2225 -printf("vcpu_set_pmd(%x,%lx)\n",reg,val);
  9.2226 +	printf("vcpu_set_pmd(%x,%lx)\n", reg, val);
  9.2227  #endif
  9.2228 -	ia64_set_pmd(reg,val);
  9.2229 -	return (IA64_NO_FAULT);
  9.2230 +	ia64_set_pmd(reg, val);
  9.2231 +	return IA64_NO_FAULT;
  9.2232  }
  9.2233  
  9.2234 -IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  9.2235 +IA64FAULT vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval)
  9.2236  {
  9.2237  	// NOTE: Reads from unimplemented PMC registers return zero
  9.2238 -	UINT64 val = (UINT64)ia64_get_pmc(reg);
  9.2239 +	u64 val = (u64) ia64_get_pmc(reg);
  9.2240  #ifdef DEBUG_PFMON
  9.2241 -printf("%lx=vcpu_get_pmc(%x)\n",val,reg);
  9.2242 +	printf("%lx=vcpu_get_pmc(%x)\n", val, reg);
  9.2243  #endif
  9.2244  	*pval = val;
  9.2245 -	return (IA64_NO_FAULT);
  9.2246 +	return IA64_NO_FAULT;
  9.2247  }
  9.2248  
  9.2249 -IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  9.2250 +IA64FAULT vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval)
  9.2251  {
  9.2252  	// NOTE: Reads from unimplemented PMD registers return zero
  9.2253 -	UINT64 val = (UINT64)ia64_get_pmd(reg);
  9.2254 +	u64 val = (u64) ia64_get_pmd(reg);
  9.2255  #ifdef DEBUG_PFMON
  9.2256 -printf("%lx=vcpu_get_pmd(%x)\n",val,reg);
  9.2257 +	printf("%lx=vcpu_get_pmd(%x)\n", val, reg);
  9.2258  #endif
  9.2259  	*pval = val;
  9.2260 -	return (IA64_NO_FAULT);
  9.2261 +	return IA64_NO_FAULT;
  9.2262  }
  9.2263  
  9.2264  /**************************************************************************
  9.2265 @@ -1718,167 +1836,183 @@ do{     \
  9.2266          "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory");    \
  9.2267  }while(0)
  9.2268  
  9.2269 -IA64FAULT vcpu_bsw0(VCPU *vcpu)
  9.2270 +IA64FAULT vcpu_bsw0(VCPU * vcpu)
  9.2271  {
  9.2272  	// TODO: Only allowed for current vcpu
  9.2273  	REGS *regs = vcpu_regs(vcpu);
  9.2274  	unsigned long *r = &regs->r16;
  9.2275 -	unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
  9.2276 -	unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
  9.2277 +	unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);
  9.2278 +	unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);
  9.2279  	unsigned long *runat = &regs->eml_unat;
  9.2280 -	unsigned long *b0unat = &PSCB(vcpu,vbnat);
  9.2281 -	unsigned long *b1unat = &PSCB(vcpu,vnat);
  9.2282 +	unsigned long *b0unat = &PSCB(vcpu, vbnat);
  9.2283 +	unsigned long *b1unat = &PSCB(vcpu, vnat);
  9.2284  
  9.2285  	unsigned long i;
  9.2286  
  9.2287 -    if(VMX_DOMAIN(vcpu)){
  9.2288 -        if(VCPU(vcpu,vpsr)&IA64_PSR_BN){
  9.2289 -            for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
  9.2290 -            vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
  9.2291 -            VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
  9.2292 -        }
  9.2293 -    }else{
  9.2294 -        if (PSCB(vcpu,banknum)) {
  9.2295 -            for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
  9.2296 -            vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
  9.2297 -            PSCB(vcpu,banknum) = 0;
  9.2298 -        }
  9.2299 -    }
  9.2300 -	return (IA64_NO_FAULT);
  9.2301 +	if (VMX_DOMAIN(vcpu)) {
  9.2302 +		if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
  9.2303 +			for (i = 0; i < 16; i++) {
  9.2304 +				*b1++ = *r;
  9.2305 +				*r++ = *b0++;
  9.2306 +			}
  9.2307 +			vcpu_bsw0_unat(i, b0unat, b1unat, runat,
  9.2308 +				       IA64_PT_REGS_R16_SLOT);
  9.2309 +			VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
  9.2310 +		}
  9.2311 +	} else {
  9.2312 +		if (PSCB(vcpu, banknum)) {
  9.2313 +			for (i = 0; i < 16; i++) {
  9.2314 +				*b1++ = *r;
  9.2315 +				*r++ = *b0++;
  9.2316 +			}
  9.2317 +			vcpu_bsw0_unat(i, b0unat, b1unat, runat,
  9.2318 +			               IA64_PT_REGS_R16_SLOT);
  9.2319 +			PSCB(vcpu, banknum) = 0;
  9.2320 +		}
  9.2321 +	}
  9.2322 +	return IA64_NO_FAULT;
  9.2323  }
  9.2324  
  9.2325 -#define vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT)     \
  9.2326 -do{             \
  9.2327 -    __asm__ __volatile__ (      \
  9.2328 -        ";;extr.u %0 = %3,%6,16;;\n"                \
  9.2329 -        "dep %1 = %0, %1, 16, 16;;\n"               \
  9.2330 -        "st8 [%4] = %1\n"                           \
  9.2331 -        "extr.u %0 = %2, 0, 16;;\n"                 \
  9.2332 -        "dep %3 = %0, %3, %6, 16;;\n"               \
  9.2333 -        "st8 [%5] = %3\n"                           \
  9.2334 -        ::"r"(i),"r"(*b0unat),"r"(*b1unat),"r"(*runat),"r"(b0unat), \
  9.2335 -        "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory");            \
  9.2336 -}while(0)
  9.2337 +#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, IA64_PT_REGS_R16_SLOT)	\
  9.2338 +do {             							\
  9.2339 +	__asm__ __volatile__ (";;extr.u %0 = %3,%6,16;;\n"		\
  9.2340 +        		      "dep %1 = %0, %1, 16, 16;;\n"		\
  9.2341 +			      "st8 [%4] = %1\n"				\
  9.2342 +			      "extr.u %0 = %2, 0, 16;;\n"		\
  9.2343 +			      "dep %3 = %0, %3, %6, 16;;\n"		\
  9.2344 +			      "st8 [%5] = %3\n"				\
  9.2345 +			      ::"r"(i), "r"(*b0unat), "r"(*b1unat),	\
  9.2346 +			      "r"(*runat), "r"(b0unat), "r"(runat),	\
  9.2347 +			      "i"(IA64_PT_REGS_R16_SLOT): "memory");	\
  9.2348 +} while(0)
  9.2349  
  9.2350 -IA64FAULT vcpu_bsw1(VCPU *vcpu)
  9.2351 +IA64FAULT vcpu_bsw1(VCPU * vcpu)
  9.2352  {
  9.2353  	// TODO: Only allowed for current vcpu
  9.2354  	REGS *regs = vcpu_regs(vcpu);
  9.2355  	unsigned long *r = &regs->r16;
  9.2356 -	unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
  9.2357 -	unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
  9.2358 +	unsigned long *b0 = &PSCB(vcpu, bank0_regs[0]);
  9.2359 +	unsigned long *b1 = &PSCB(vcpu, bank1_regs[0]);
  9.2360  	unsigned long *runat = &regs->eml_unat;
  9.2361 -	unsigned long *b0unat = &PSCB(vcpu,vbnat);
  9.2362 -	unsigned long *b1unat = &PSCB(vcpu,vnat);
  9.2363 +	unsigned long *b0unat = &PSCB(vcpu, vbnat);
  9.2364 +	unsigned long *b1unat = &PSCB(vcpu, vnat);
  9.2365  
  9.2366  	unsigned long i;
  9.2367  
  9.2368 -    if(VMX_DOMAIN(vcpu)){
  9.2369 -        if(!(VCPU(vcpu,vpsr)&IA64_PSR_BN)){
  9.2370 -            for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
  9.2371 -            vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
  9.2372 -            VCPU(vcpu,vpsr) |= IA64_PSR_BN;
  9.2373 -        }
  9.2374 -    }else{
  9.2375 -        if (!PSCB(vcpu,banknum)) {
  9.2376 -            for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
  9.2377 -            vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
  9.2378 -            PSCB(vcpu,banknum) = 1;
  9.2379 -        }
  9.2380 -    }
  9.2381 -	return (IA64_NO_FAULT);
  9.2382 +	if (VMX_DOMAIN(vcpu)) {
  9.2383 +		if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
  9.2384 +			for (i = 0; i < 16; i++) {
  9.2385 +				*b0++ = *r;
  9.2386 +				*r++ = *b1++;
  9.2387 +			}
  9.2388 +			vcpu_bsw1_unat(i, b0unat, b1unat, runat,
  9.2389 +			               IA64_PT_REGS_R16_SLOT);
  9.2390 +			VCPU(vcpu, vpsr) |= IA64_PSR_BN;
  9.2391 +		}
  9.2392 +	} else {
  9.2393 +		if (!PSCB(vcpu, banknum)) {
  9.2394 +			for (i = 0; i < 16; i++) {
  9.2395 +				*b0++ = *r;
  9.2396 +				*r++ = *b1++;
  9.2397 +			}
  9.2398 +			vcpu_bsw1_unat(i, b0unat, b1unat, runat,
  9.2399 +			               IA64_PT_REGS_R16_SLOT);
  9.2400 +			PSCB(vcpu, banknum) = 1;
  9.2401 +		}
  9.2402 +	}
  9.2403 +	return IA64_NO_FAULT;
  9.2404  }
  9.2405  
  9.2406  /**************************************************************************
  9.2407   VCPU cpuid access routines
  9.2408  **************************************************************************/
  9.2409  
  9.2410 -
  9.2411 -IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  9.2412 +IA64FAULT vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval)
  9.2413  {
  9.2414  	// FIXME: This could get called as a result of a rsvd-reg fault
  9.2415  	// if reg > 3
  9.2416 -	switch(reg) {
  9.2417 -	    case 0:
  9.2418 -		memcpy(pval,"Xen/ia64",8);
  9.2419 +	switch (reg) {
  9.2420 +	case 0:
  9.2421 +		memcpy(pval, "Xen/ia64", 8);
  9.2422  		break;
  9.2423 -	    case 1:
  9.2424 +	case 1:
  9.2425  		*pval = 0;
  9.2426  		break;
  9.2427 -	    case 2:
  9.2428 +	case 2:
  9.2429  		*pval = 0;
  9.2430  		break;
  9.2431 -	    case 3:
  9.2432 +	case 3:
  9.2433  		*pval = ia64_get_cpuid(3);
  9.2434  		break;
  9.2435 -	    case 4:
  9.2436 +	case 4:
  9.2437  		*pval = ia64_get_cpuid(4);
  9.2438  		break;
  9.2439 -	    default:
  9.2440 +	default:
  9.2441  		if (reg > (ia64_get_cpuid(3) & 0xff))
  9.2442  			return IA64_RSVDREG_FAULT;
  9.2443  		*pval = ia64_get_cpuid(reg);
  9.2444  		break;
  9.2445  	}
  9.2446 -	return (IA64_NO_FAULT);
  9.2447 +	return IA64_NO_FAULT;
  9.2448  }
  9.2449  
  9.2450  /**************************************************************************
  9.2451   VCPU region register access routines
  9.2452  **************************************************************************/
  9.2453  
  9.2454 -unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
  9.2455 +unsigned long vcpu_get_rr_ve(VCPU * vcpu, u64 vadr)
  9.2456  {
  9.2457  	ia64_rr rr;
  9.2458  
  9.2459 -	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
  9.2460 -	return(rr.ve);
  9.2461 +	rr.rrval = PSCB(vcpu, rrs)[vadr >> 61];
  9.2462 +	return rr.ve;
  9.2463  }
  9.2464  
  9.2465 -IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
  9.2466 +IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val)
  9.2467  {
  9.2468 -	PSCB(vcpu,rrs)[reg>>61] = val;
  9.2469 +	PSCB(vcpu, rrs)[reg >> 61] = val;
  9.2470  	// warning: set_one_rr() does it "live"
  9.2471 -	set_one_rr(reg,val);
  9.2472 -	return (IA64_NO_FAULT);
  9.2473 +	set_one_rr(reg, val);
  9.2474 +	return IA64_NO_FAULT;
  9.2475  }
  9.2476  
  9.2477 -IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  9.2478 +IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval)
  9.2479  {
  9.2480 -	if(VMX_DOMAIN(vcpu)){
  9.2481 -		*pval = VMX(vcpu,vrr[reg>>61]);
  9.2482 -	}else{
  9.2483 -		*pval = PSCB(vcpu,rrs)[reg>>61];
  9.2484 -	}
  9.2485 -	return (IA64_NO_FAULT);
  9.2486 +	if (VMX_DOMAIN(vcpu))
  9.2487 +		*pval = VMX(vcpu, vrr[reg >> 61]);
  9.2488 +	else
  9.2489 +		*pval = PSCB(vcpu, rrs)[reg >> 61];
  9.2490 +
  9.2491 +	return IA64_NO_FAULT;
  9.2492  }
  9.2493  
  9.2494  /**************************************************************************
  9.2495   VCPU protection key register access routines
  9.2496  **************************************************************************/
  9.2497  
  9.2498 -IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  9.2499 +IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval)
  9.2500  {
  9.2501  #ifndef PKR_USE_FIXED
  9.2502  	printk("vcpu_get_pkr: called, not implemented yet\n");
  9.2503  	return IA64_ILLOP_FAULT;
  9.2504  #else
  9.2505 -	UINT64 val = (UINT64)ia64_get_pkr(reg);
  9.2506 +	u64 val = (u64) ia64_get_pkr(reg);
  9.2507  	*pval = val;
  9.2508 -	return (IA64_NO_FAULT);
  9.2509 +	return IA64_NO_FAULT;
  9.2510  #endif
  9.2511  }
  9.2512  
  9.2513 -IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
  9.2514 +IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val)
  9.2515  {
  9.2516  #ifndef PKR_USE_FIXED
  9.2517  	printk("vcpu_set_pkr: called, not implemented yet\n");
  9.2518  	return IA64_ILLOP_FAULT;
  9.2519  #else
  9.2520 -//	if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
  9.2521 +//      if (reg >= NPKRS)
  9.2522 +//		return IA64_ILLOP_FAULT;
  9.2523  	vcpu->pkrs[reg] = val;
  9.2524 -	ia64_set_pkr(reg,val);
  9.2525 -	return (IA64_NO_FAULT);
  9.2526 +	ia64_set_pkr(reg, val);
  9.2527 +	return IA64_NO_FAULT;
  9.2528  #endif
  9.2529  }
  9.2530  
  9.2531 @@ -1887,21 +2021,22 @@ IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT6
  9.2532  **************************************************************************/
  9.2533  
  9.2534  static void
  9.2535 -vcpu_set_tr_entry_rid(TR_ENTRY *trp, UINT64 pte,
  9.2536 -                      UINT64 itir, UINT64 ifa, UINT64 rid)
  9.2537 +vcpu_set_tr_entry_rid(TR_ENTRY * trp, u64 pte,
  9.2538 +                      u64 itir, u64 ifa, u64 rid)
  9.2539  {
  9.2540 -	UINT64 ps;
  9.2541 +	u64 ps;
  9.2542  	union pte_flags new_pte;
  9.2543  
  9.2544  	trp->itir = itir;
  9.2545  	trp->rid = rid;
  9.2546  	ps = trp->ps;
  9.2547  	new_pte.val = pte;
  9.2548 -	if (new_pte.pl < 2) new_pte.pl = 2;
  9.2549 +	if (new_pte.pl < 2)
  9.2550 +		new_pte.pl = 2;
  9.2551  	trp->vadr = ifa & ~0xfff;
  9.2552 -	if (ps > 12) { // "ignore" relevant low-order bits
  9.2553 -		new_pte.ppn &= ~((1UL<<(ps-12))-1);
  9.2554 -		trp->vadr &= ~((1UL<<ps)-1);
  9.2555 +	if (ps > 12) {		// "ignore" relevant low-order bits
  9.2556 +		new_pte.ppn &= ~((1UL << (ps - 12)) - 1);
  9.2557 +		trp->vadr &= ~((1UL << ps) - 1);
  9.2558  	}
  9.2559  
  9.2560  	/* Atomic write.  */
  9.2561 @@ -1909,25 +2044,26 @@ vcpu_set_tr_entry_rid(TR_ENTRY *trp, UIN
  9.2562  }
  9.2563  
  9.2564  static inline void
  9.2565 -vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
  9.2566 +vcpu_set_tr_entry(TR_ENTRY * trp, u64 pte, u64 itir, u64 ifa)
  9.2567  {
  9.2568  	vcpu_set_tr_entry_rid(trp, pte, itir, ifa,
  9.2569 -			      VCPU(current, rrs[ifa>>61]) & RR_RID_MASK);
  9.2570 +			      VCPU(current, rrs[ifa >> 61]) & RR_RID_MASK);
  9.2571  }
  9.2572  
  9.2573 -IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
  9.2574 -                     UINT64 itir, UINT64 ifa)
  9.2575 +IA64FAULT vcpu_itr_d(VCPU * vcpu, u64 slot, u64 pte,
  9.2576 +                     u64 itir, u64 ifa)
  9.2577  {
  9.2578  	TR_ENTRY *trp;
  9.2579  
  9.2580 -	if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
  9.2581 +	if (slot >= NDTRS)
  9.2582 +		return IA64_RSVDREG_FAULT;
  9.2583  
  9.2584  	vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
  9.2585  
  9.2586 -	trp = &PSCBX(vcpu,dtrs[slot]);
  9.2587 +	trp = &PSCBX(vcpu, dtrs[slot]);
  9.2588  //printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
  9.2589 -	vcpu_set_tr_entry(trp,pte,itir,ifa);
  9.2590 -	vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),ifa);
  9.2591 +	vcpu_set_tr_entry(trp, pte, itir, ifa);
  9.2592 +	vcpu_quick_region_set(PSCBX(vcpu, dtr_regions), ifa);
  9.2593  
  9.2594  	/*
  9.2595  	 * FIXME According to spec, vhpt should be purged, but this
  9.2596 @@ -1941,19 +2077,20 @@ IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 
  9.2597  	return IA64_NO_FAULT;
  9.2598  }
  9.2599  
  9.2600 -IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
  9.2601 -                     UINT64 itir, UINT64 ifa)
  9.2602 +IA64FAULT vcpu_itr_i(VCPU * vcpu, u64 slot, u64 pte,
  9.2603 +                     u64 itir, u64 ifa)
  9.2604  {
  9.2605  	TR_ENTRY *trp;
  9.2606  
  9.2607 -	if (slot >= NITRS) return IA64_RSVDREG_FAULT;
  9.2608 +	if (slot >= NITRS)
  9.2609 +		return IA64_RSVDREG_FAULT;
  9.2610  
  9.2611  	vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
  9.2612  
  9.2613 -	trp = &PSCBX(vcpu,itrs[slot]);
  9.2614 +	trp = &PSCBX(vcpu, itrs[slot]);
  9.2615  //printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
  9.2616 -	vcpu_set_tr_entry(trp,pte,itir,ifa);
  9.2617 -	vcpu_quick_region_set(PSCBX(vcpu,itr_regions),ifa);
  9.2618 +	vcpu_set_tr_entry(trp, pte, itir, ifa);
  9.2619 +	vcpu_quick_region_set(PSCBX(vcpu, itr_regions), ifa);
  9.2620  
  9.2621  	/*
  9.2622  	 * FIXME According to spec, vhpt should be purged, but this
  9.2623 @@ -1967,13 +2104,13 @@ IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 
  9.2624  	return IA64_NO_FAULT;
  9.2625  }
  9.2626  
  9.2627 -IA64FAULT vcpu_set_itr(VCPU *vcpu, u64 slot, u64 pte,
  9.2628 +IA64FAULT vcpu_set_itr(VCPU * vcpu, u64 slot, u64 pte,
  9.2629                         u64 itir, u64 ifa, u64 rid)
  9.2630  {
  9.2631  	TR_ENTRY *trp;
  9.2632  
  9.2633  	if (slot >= NITRS)
  9.2634 - 		return IA64_RSVDREG_FAULT;
  9.2635 +		return IA64_RSVDREG_FAULT;
  9.2636  	trp = &PSCBX(vcpu, itrs[slot]);
  9.2637  	vcpu_set_tr_entry_rid(trp, pte, itir, ifa, rid);
  9.2638  
  9.2639 @@ -1986,7 +2123,7 @@ IA64FAULT vcpu_set_itr(VCPU *vcpu, u64 s
  9.2640  	return IA64_NO_FAULT;
  9.2641  }
  9.2642  
  9.2643 -IA64FAULT vcpu_set_dtr(VCPU *vcpu, u64 slot, u64 pte,
  9.2644 +IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 slot, u64 pte,
  9.2645                         u64 itir, u64 ifa, u64 rid)
  9.2646  {
  9.2647  	TR_ENTRY *trp;
  9.2648 @@ -2010,65 +2147,71 @@ IA64FAULT vcpu_set_dtr(VCPU *vcpu, u64 s
  9.2649  **************************************************************************/
  9.2650  
  9.2651  void
  9.2652 -vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte,
  9.2653 -                 UINT64 mp_pte, UINT64 logps, struct p2m_entry* entry)
  9.2654 +vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,
  9.2655 +                 u64 mp_pte, u64 logps, struct p2m_entry *entry)
  9.2656  {
  9.2657  	unsigned long psr;
  9.2658 -	unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
  9.2659 +	unsigned long ps = (vcpu->domain == dom0) ? logps : PAGE_SHIFT;
  9.2660  
  9.2661 -	check_xen_space_overlap ("itc", vaddr, 1UL << logps);
  9.2662 +	check_xen_space_overlap("itc", vaddr, 1UL << logps);
  9.2663  
  9.2664  	// FIXME, must be inlined or potential for nested fault here!
  9.2665 -	if ((vcpu->domain==dom0) && (logps < PAGE_SHIFT))
  9.2666 -		panic_domain (NULL, "vcpu_itc_no_srlz: domain trying to use "
  9.2667 - 			      "smaller page size!\n");
  9.2668 +	if ((vcpu->domain == dom0) && (logps < PAGE_SHIFT))
  9.2669 +		panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use "
  9.2670 +		             "smaller page size!\n");
  9.2671  
  9.2672  	BUG_ON(logps > PAGE_SHIFT);
  9.2673  	vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry);
  9.2674  	psr = ia64_clear_ic();
  9.2675 -	ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
  9.2676 +	ia64_itc(IorD, vaddr, pte, ps);	// FIXME: look for bigger mappings
  9.2677  	ia64_set_psr(psr);
  9.2678  	// ia64_srlz_i(); // no srls req'd, will rfi later
  9.2679  #ifdef VHPT_GLOBAL
  9.2680 -	if (vcpu->domain==dom0 && ((vaddr >> 61) == 7)) {
  9.2681 +	if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) {
  9.2682  		// FIXME: this is dangerous... vhpt_flush_address ensures these
  9.2683  		// addresses never get flushed.  More work needed if this
  9.2684  		// ever happens.
  9.2685  //printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
  9.2686 -		if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
  9.2687 -		else vhpt_insert(vaddr,pte,logps<<2);
  9.2688 +		if (logps > PAGE_SHIFT)
  9.2689 +			vhpt_multiple_insert(vaddr, pte, logps);
  9.2690 +		else
  9.2691 +			vhpt_insert(vaddr, pte, logps << 2);
  9.2692  	}
  9.2693  	// even if domain pagesize is larger than PAGE_SIZE, just put
  9.2694  	// PAGE_SIZE mapping in the vhpt for now, else purging is complicated
  9.2695 -	else vhpt_insert(vaddr,pte,PAGE_SHIFT<<2);
  9.2696 +	else
  9.2697 +		vhpt_insert(vaddr, pte, PAGE_SHIFT << 2);
  9.2698  #endif
  9.2699 -	if (IorD & 0x4) /* don't place in 1-entry TLB */
  9.2700 +	if (IorD & 0x4)		/* don't place in 1-entry TLB */
  9.2701  		return;
  9.2702  	if (IorD & 0x1) {
  9.2703 -		vcpu_set_tr_entry(&PSCBX(vcpu,itlb),mp_pte,ps<<2,vaddr);
  9.2704 +		vcpu_set_tr_entry(&PSCBX(vcpu, itlb), mp_pte, ps << 2, vaddr);
  9.2705  	}
  9.2706  	if (IorD & 0x2) {
  9.2707 -		vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),mp_pte,ps<<2,vaddr);
  9.2708 +		vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), mp_pte, ps << 2, vaddr);
  9.2709  	}
  9.2710  }
  9.2711  
  9.2712 -IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  9.2713 +IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
  9.2714  {
  9.2715  	unsigned long pteval, logps = itir_ps(itir);
  9.2716 -	BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
  9.2717 +	BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
  9.2718  	struct p2m_entry entry;
  9.2719  
  9.2720  	if (logps < PAGE_SHIFT)
  9.2721 -		panic_domain (NULL, "vcpu_itc_d: domain trying to use "
  9.2722 - 			      "smaller page size!\n");
  9.2723 +		panic_domain(NULL, "vcpu_itc_d: domain trying to use "
  9.2724 +		             "smaller page size!\n");
  9.2725  
  9.2726 -again:
  9.2727 + again:
  9.2728  	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
  9.2729  	pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
  9.2730 -	if (!pteval) return IA64_ILLOP_FAULT;
  9.2731 -	if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
  9.2732 +	if (!pteval)
  9.2733 +		return IA64_ILLOP_FAULT;
  9.2734 +	if (swap_rr0)
  9.2735 +		set_one_rr(0x0, PSCB(vcpu, rrs[0]));
  9.2736  	vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, logps, &entry);
  9.2737 -	if (swap_rr0) set_metaphysical_rr0();
  9.2738 +	if (swap_rr0)
  9.2739 +		set_metaphysical_rr0();
  9.2740  	if (p2m_entry_retry(&entry)) {
  9.2741  		vcpu_flush_tlb_vhpt_range(ifa, logps);
  9.2742  		goto again;
  9.2743 @@ -2076,22 +2219,25 @@ again:
  9.2744  	return IA64_NO_FAULT;
  9.2745  }
  9.2746  
  9.2747 -IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  9.2748 +IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
  9.2749  {
  9.2750  	unsigned long pteval, logps = itir_ps(itir);
  9.2751 -	BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
  9.2752 +	BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
  9.2753  	struct p2m_entry entry;
  9.2754  
  9.2755  	if (logps < PAGE_SHIFT)
  9.2756 -		panic_domain (NULL, "vcpu_itc_i: domain trying to use "
  9.2757 - 			      "smaller page size!\n");
  9.2758 -again:
  9.2759 +		panic_domain(NULL, "vcpu_itc_i: domain trying to use "
  9.2760 +		             "smaller page size!\n");
  9.2761 +      again:
  9.2762  	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
  9.2763  	pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
  9.2764 -	if (!pteval) return IA64_ILLOP_FAULT;
  9.2765 -	if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
  9.2766 +	if (!pteval)
  9.2767 +		return IA64_ILLOP_FAULT;
  9.2768 +	if (swap_rr0)
  9.2769 +		set_one_rr(0x0, PSCB(vcpu, rrs[0]));
  9.2770  	vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, logps, &entry);
  9.2771 -	if (swap_rr0) set_metaphysical_rr0();
  9.2772 +	if (swap_rr0)
  9.2773 +		set_metaphysical_rr0();
  9.2774  	if (p2m_entry_retry(&entry)) {
  9.2775  		vcpu_flush_tlb_vhpt_range(ifa, logps);
  9.2776  		goto again;
  9.2777 @@ -2099,18 +2245,18 @@ again:
  9.2778  	return IA64_NO_FAULT;
  9.2779  }
  9.2780  
  9.2781 -IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 log_range)
  9.2782 +IA64FAULT vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 log_range)
  9.2783  {
  9.2784  	BUG_ON(vcpu != current);
  9.2785  
  9.2786 -	check_xen_space_overlap ("ptc_l", vadr, 1UL << log_range);
  9.2787 +	check_xen_space_overlap("ptc_l", vadr, 1UL << log_range);
  9.2788  
  9.2789  	/* Purge TC  */
  9.2790 -	vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
  9.2791 -	vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
  9.2792 -	
  9.2793 +	vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
  9.2794 +	vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
  9.2795 +
  9.2796  	/* Purge all tlb and vhpt */
  9.2797 -	vcpu_flush_tlb_vhpt_range (vadr, log_range);
  9.2798 +	vcpu_flush_tlb_vhpt_range(vadr, log_range);
  9.2799  
  9.2800  	return IA64_NO_FAULT;
  9.2801  }
  9.2802 @@ -2121,13 +2267,13 @@ IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 
  9.2803  // access rights fault, we have to translate the virtual address to a
  9.2804  // physical address (possibly via a metaphysical address) and do the fc
  9.2805  // on the physical address, which is guaranteed to flush the same cache line
  9.2806 -IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
  9.2807 +IA64FAULT vcpu_fc(VCPU * vcpu, u64 vadr)
  9.2808  {
  9.2809  	// TODO: Only allowed for current vcpu
  9.2810 -	UINT64 mpaddr, paddr;
  9.2811 +	u64 mpaddr, paddr;
  9.2812  	IA64FAULT fault;
  9.2813  
  9.2814 -again:
  9.2815 +      again:
  9.2816  	fault = vcpu_tpa(vcpu, vadr, &mpaddr);
  9.2817  	if (fault == IA64_NO_FAULT) {
  9.2818  		struct p2m_entry entry;
  9.2819 @@ -2139,7 +2285,7 @@ again:
  9.2820  	return fault;
  9.2821  }
  9.2822  
  9.2823 -IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
  9.2824 +IA64FAULT vcpu_ptc_e(VCPU * vcpu, u64 vadr)
  9.2825  {
  9.2826  	// Note that this only needs to be called once, i.e. the
  9.2827  	// architected loop to purge the entire TLB, should use
  9.2828 @@ -2150,27 +2296,27 @@ IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 
  9.2829  	return IA64_NO_FAULT;
  9.2830  }
  9.2831  
  9.2832 -IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
  9.2833 +IA64FAULT vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 addr_range)
  9.2834  {
  9.2835  	printk("vcpu_ptc_g: called, not implemented yet\n");
  9.2836  	return IA64_ILLOP_FAULT;
  9.2837  }
  9.2838  
  9.2839 -IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
  9.2840 +IA64FAULT vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 addr_range)
  9.2841  {
  9.2842  	// FIXME: validate not flushing Xen addresses
  9.2843  	// if (Xen address) return(IA64_ILLOP_FAULT);
  9.2844  	// FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
  9.2845  //printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
  9.2846  
  9.2847 -	check_xen_space_overlap ("ptc_ga", vadr, addr_range);
  9.2848 +	check_xen_space_overlap("ptc_ga", vadr, addr_range);
  9.2849  
  9.2850 -	domain_flush_vtlb_range (vcpu->domain, vadr, addr_range);
  9.2851 +	domain_flush_vtlb_range(vcpu->domain, vadr, addr_range);
  9.2852  
  9.2853  	return IA64_NO_FAULT;
  9.2854  }
  9.2855  
  9.2856 -IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 log_range)
  9.2857 +IA64FAULT vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 log_range)
  9.2858  {
  9.2859  	unsigned long region = vadr >> 61;
  9.2860  	u64 addr_range = 1UL << log_range;
  9.2861 @@ -2179,29 +2325,30 @@ IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 v
  9.2862  	TR_ENTRY *trp;
  9.2863  
  9.2864  	BUG_ON(vcpu != current);
  9.2865 -	check_xen_space_overlap ("ptr_d", vadr, 1UL << log_range);
  9.2866 +	check_xen_space_overlap("ptr_d", vadr, 1UL << log_range);
  9.2867  
  9.2868 -	rr = PSCB(vcpu,rrs)[region];
  9.2869 +	rr = PSCB(vcpu, rrs)[region];
  9.2870  	rid = rr & RR_RID_MASK;
  9.2871  
  9.2872  	/* Purge TC  */
  9.2873 -	vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
  9.2874 +	vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
  9.2875  
  9.2876  	/* Purge tr and recompute dtr_regions.  */
  9.2877  	vcpu->arch.dtr_regions = 0;
  9.2878  	for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++)
  9.2879 -		if (vcpu_match_tr_entry_range (trp,rid, vadr, vadr+addr_range))
  9.2880 +		if (vcpu_match_tr_entry_range
  9.2881 +		    (trp, rid, vadr, vadr + addr_range))
  9.2882  			vcpu_purge_tr_entry(trp);
  9.2883  		else if (trp->pte.p)
  9.2884  			vcpu_quick_region_set(vcpu->arch.dtr_regions,
  9.2885  					      trp->vadr);
  9.2886  
  9.2887 -	vcpu_flush_tlb_vhpt_range (vadr, log_range);
  9.2888 +	vcpu_flush_tlb_vhpt_range(vadr, log_range);
  9.2889  
  9.2890  	return IA64_NO_FAULT;
  9.2891  }
  9.2892  
  9.2893 -IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 log_range)
  9.2894 +IA64FAULT vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 log_range)
  9.2895  {
  9.2896  	unsigned long region = vadr >> 61;
  9.2897  	u64 addr_range = 1UL << log_range;
  9.2898 @@ -2210,24 +2357,25 @@ IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 v
  9.2899  	TR_ENTRY *trp;
  9.2900  
  9.2901  	BUG_ON(vcpu != current);
  9.2902 -	check_xen_space_overlap ("ptr_i", vadr, 1UL << log_range);
  9.2903 +	check_xen_space_overlap("ptr_i", vadr, 1UL << log_range);
  9.2904  
  9.2905 -	rr = PSCB(vcpu,rrs)[region];
  9.2906 +	rr = PSCB(vcpu, rrs)[region];
  9.2907  	rid = rr & RR_RID_MASK;
  9.2908  
  9.2909  	/* Purge TC  */
  9.2910 -	vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
  9.2911 +	vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
  9.2912  
  9.2913  	/* Purge tr and recompute itr_regions.  */
  9.2914  	vcpu->arch.itr_regions = 0;
  9.2915  	for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++)
  9.2916 -		if (vcpu_match_tr_entry_range (trp,rid, vadr, vadr+addr_range))
  9.2917 +		if (vcpu_match_tr_entry_range
  9.2918 +		    (trp, rid, vadr, vadr + addr_range))
  9.2919  			vcpu_purge_tr_entry(trp);
  9.2920  		else if (trp->pte.p)
  9.2921  			vcpu_quick_region_set(vcpu->arch.itr_regions,
  9.2922  					      trp->vadr);
  9.2923  
  9.2924 -	vcpu_flush_tlb_vhpt_range (vadr, log_range);
  9.2925 +	vcpu_flush_tlb_vhpt_range(vadr, log_range);
  9.2926  
  9.2927  	return IA64_NO_FAULT;
  9.2928  }
    10.1 --- a/xen/arch/ia64/xen/vhpt.c	Tue Oct 17 14:30:36 2006 -0600
    10.2 +++ b/xen/arch/ia64/xen/vhpt.c	Tue Oct 17 15:43:41 2006 -0600
    10.3 @@ -22,7 +22,7 @@
    10.4  #include <asm/vmmu.h>
    10.5  
    10.6  /* Defined in tlb.c  */
    10.7 -extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
    10.8 +extern void ia64_global_tlb_purge(u64 start, u64 end, u64 nbits);
    10.9  
   10.10  extern long running_on_sim;
   10.11  
    11.1 --- a/xen/include/asm-ia64/dom_fw.h	Tue Oct 17 14:30:36 2006 -0600
    11.2 +++ b/xen/include/asm-ia64/dom_fw.h	Tue Oct 17 15:43:41 2006 -0600
    11.3 @@ -180,7 +180,7 @@
    11.4  
    11.5  #define EFI_MEMDESC_VERSION		1
    11.6  
    11.7 -extern struct ia64_pal_retval xen_pal_emulator(UINT64, u64, u64, u64);
    11.8 +extern struct ia64_pal_retval xen_pal_emulator(u64, u64, u64, u64);
    11.9  extern struct sal_ret_values sal_emulator (long index, unsigned long in1, unsigned long in2, unsigned long in3, unsigned long in4, unsigned long in5, unsigned long in6, unsigned long in7);
   11.10  extern struct ia64_pal_retval pal_emulator_static (unsigned long);
   11.11  extern efi_status_t efi_emulator (struct pt_regs *regs, unsigned long *fault);
    12.1 --- a/xen/include/asm-ia64/privop.h	Tue Oct 17 14:30:36 2006 -0600
    12.2 +++ b/xen/include/asm-ia64/privop.h	Tue Oct 17 15:43:41 2006 -0600
    12.3 @@ -4,9 +4,9 @@
    12.4  #include <asm/ia64_int.h>
    12.5  #include <asm/vcpu.h>
    12.6  
    12.7 -extern IA64FAULT priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr);
    12.8 +extern IA64FAULT priv_emulate(VCPU *vcpu, REGS *regs, u64 isr);
    12.9  
   12.10 -extern void privify_memory(void *start, UINT64 len);
   12.11 +extern void privify_memory(void *start, u64 len);
   12.12  
   12.13  extern int ia64_hyperprivop(unsigned long iim, REGS *regs);
   12.14  
    13.1 --- a/xen/include/asm-ia64/vcpu.h	Tue Oct 17 14:30:36 2006 -0600
    13.2 +++ b/xen/include/asm-ia64/vcpu.h	Tue Oct 17 15:43:41 2006 -0600
    13.3 @@ -10,195 +10,194 @@
    13.4  #include <asm/ia64_int.h>
    13.5  #include <xen/types.h>
    13.6  #include <public/xen.h>
    13.7 -typedef	unsigned long UINT64;
    13.8 -typedef	unsigned int UINT;
    13.9 -typedef	int BOOLEAN;
   13.10 +typedef int BOOLEAN;
   13.11  struct vcpu;
   13.12 -typedef	struct vcpu VCPU;
   13.13 +typedef struct vcpu VCPU;
   13.14  typedef cpu_user_regs_t REGS;
   13.15  extern u64 cycle_to_ns(u64 cycle);
   13.16  
   13.17  /* Note: PSCB stands for Privilegied State Communication Block.  */
   13.18  #define VCPU(_v,_x)	(_v->arch.privregs->_x)
   13.19 -#define PSCB(_v,_x) VCPU(_v,_x)
   13.20 -#define PSCBX(_v,_x) (_v->arch._x)
   13.21 +#define PSCB(_v,_x)	VCPU(_v,_x)
   13.22 +#define PSCBX(_v,_x)	(_v->arch._x)
   13.23  
   13.24  #define SPURIOUS_VECTOR 0xf
   13.25  
   13.26  /* general registers */
   13.27 -extern UINT64 vcpu_get_gr(VCPU *vcpu, unsigned long reg);
   13.28 -extern IA64FAULT vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val);
   13.29 -extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int nat);
   13.30 -extern IA64FAULT vcpu_get_fpreg(VCPU *vcpu, unsigned long reg, struct ia64_fpreg *val);
   13.31 +extern u64 vcpu_get_gr(VCPU * vcpu, unsigned long reg);
   13.32 +extern IA64FAULT vcpu_get_gr_nat(VCPU * vcpu, unsigned long reg, u64 * val);
   13.33 +extern IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value,
   13.34 +                             int nat);
   13.35 +extern IA64FAULT vcpu_get_fpreg(VCPU * vcpu, unsigned long reg,
   13.36 +                                struct ia64_fpreg *val);
   13.37  
   13.38 -extern IA64FAULT vcpu_set_fpreg(VCPU *vcpu, unsigned long reg, struct ia64_fpreg *val);
   13.39 +extern IA64FAULT vcpu_set_fpreg(VCPU * vcpu, unsigned long reg,
   13.40 +                                struct ia64_fpreg *val);
   13.41  
   13.42  /* application registers */
   13.43 -extern void vcpu_load_kernel_regs(VCPU *vcpu);
   13.44 -extern IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val);
   13.45 -extern IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val);
   13.46 +extern void vcpu_load_kernel_regs(VCPU * vcpu);
   13.47 +extern IA64FAULT vcpu_set_ar(VCPU * vcpu, u64 reg, u64 val);
   13.48 +extern IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 reg, u64 * val);
   13.49  /* psr */
   13.50 -extern BOOLEAN vcpu_get_psr_ic(VCPU *vcpu);
   13.51 -extern UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr);
   13.52 -extern IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval);
   13.53 -extern IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm);
   13.54 -extern IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm);
   13.55 -extern IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
   13.56 -extern IA64FAULT vcpu_set_psr_i(VCPU *vcpu);
   13.57 -extern IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu);
   13.58 -extern IA64FAULT vcpu_set_psr_dt(VCPU *vcpu);
   13.59 +extern BOOLEAN vcpu_get_psr_ic(VCPU * vcpu);
   13.60 +extern u64 vcpu_get_ipsr_int_state(VCPU * vcpu, u64 prevpsr);
   13.61 +extern IA64FAULT vcpu_get_psr(VCPU * vcpu, u64 * pval);
   13.62 +extern IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm);
   13.63 +extern IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm);
   13.64 +extern IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val);
   13.65 +extern IA64FAULT vcpu_set_psr_i(VCPU * vcpu);
   13.66 +extern IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu);
   13.67 +extern IA64FAULT vcpu_set_psr_dt(VCPU * vcpu);
   13.68  /* control registers */
   13.69 -extern IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val);
   13.70 -extern IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val);
   13.71 -extern IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val);
   13.72 -extern IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val);
   13.73 -extern IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val);
   13.74 -extern IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val);
   13.75 -extern IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val);
   13.76 -extern IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val);
   13.77 -extern IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val);
   13.78 -extern IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val);
   13.79 -extern IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val);
   13.80 -extern IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val);
   13.81 -extern IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val);
   13.82 -extern IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val);
   13.83 -extern IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val);
   13.84 -extern IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val);
   13.85 -extern IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val);
   13.86 -extern IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val);
   13.87 -extern IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval);
   13.88 -extern IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval);
   13.89 -extern IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval);
   13.90 -extern IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval);
   13.91 -extern IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval);
   13.92 -extern IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval);
   13.93 -extern IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval);
   13.94 -extern IA64FAULT vcpu_increment_iip(VCPU *vcpu);
   13.95 -extern IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval);
   13.96 -extern IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval);
   13.97 -extern unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa);
   13.98 -extern IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval);
   13.99 -extern IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval);
  13.100 -extern IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval);
  13.101 -extern IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval);
  13.102 -extern IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval);
  13.103 -extern IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval);
  13.104 -extern IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval);
  13.105 -extern IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval);
  13.106 -extern IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval);
  13.107 -extern IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval);
  13.108 -extern IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval);
  13.109 -extern IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval);
  13.110 +extern IA64FAULT vcpu_set_dcr(VCPU * vcpu, u64 val);
  13.111 +extern IA64FAULT vcpu_set_itm(VCPU * vcpu, u64 val);
  13.112 +extern IA64FAULT vcpu_set_iva(VCPU * vcpu, u64 val);
  13.113 +extern IA64FAULT vcpu_set_pta(VCPU * vcpu, u64 val);
  13.114 +extern IA64FAULT vcpu_set_ipsr(VCPU * vcpu, u64 val);
  13.115 +extern IA64FAULT vcpu_set_isr(VCPU * vcpu, u64 val);
  13.116 +extern IA64FAULT vcpu_set_iip(VCPU * vcpu, u64 val);
  13.117 +extern IA64FAULT vcpu_set_ifa(VCPU * vcpu, u64 val);
  13.118 +extern IA64FAULT vcpu_set_itir(VCPU * vcpu, u64 val);
  13.119 +extern IA64FAULT vcpu_set_iipa(VCPU * vcpu, u64 val);
  13.120 +extern IA64FAULT vcpu_set_ifs(VCPU * vcpu, u64 val);
  13.121 +extern IA64FAULT vcpu_set_iim(VCPU * vcpu, u64 val);
  13.122 +extern IA64FAULT vcpu_set_iha(VCPU * vcpu, u64 val);
  13.123 +extern IA64FAULT vcpu_set_lid(VCPU * vcpu, u64 val);
  13.124 +extern IA64FAULT vcpu_set_tpr(VCPU * vcpu, u64 val);
  13.125 +extern IA64FAULT vcpu_set_eoi(VCPU * vcpu, u64 val);
  13.126 +extern IA64FAULT vcpu_set_lrr0(VCPU * vcpu, u64 val);
  13.127 +extern IA64FAULT vcpu_set_lrr1(VCPU * vcpu, u64 val);
  13.128 +extern IA64FAULT vcpu_get_dcr(VCPU * vcpu, u64 * pval);
  13.129 +extern IA64FAULT vcpu_get_itm(VCPU * vcpu, u64 * pval);
  13.130 +extern IA64FAULT vcpu_get_iva(VCPU * vcpu, u64 * pval);
  13.131 +extern IA64FAULT vcpu_get_pta(VCPU * vcpu, u64 * pval);
  13.132 +extern IA64FAULT vcpu_get_ipsr(VCPU * vcpu, u64 * pval);
  13.133 +extern IA64FAULT vcpu_get_isr(VCPU * vcpu, u64 * pval);
  13.134 +extern IA64FAULT vcpu_get_iip(VCPU * vcpu, u64 * pval);
  13.135 +extern IA64FAULT vcpu_increment_iip(VCPU * vcpu);
  13.136 +extern IA64FAULT vcpu_get_ifa(VCPU * vcpu, u64 * pval);
  13.137 +extern IA64FAULT vcpu_get_itir(VCPU * vcpu, u64 * pval);
  13.138 +extern unsigned long vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa);
  13.139 +extern IA64FAULT vcpu_get_iipa(VCPU * vcpu, u64 * pval);
  13.140 +extern IA64FAULT vcpu_get_ifs(VCPU * vcpu, u64 * pval);
  13.141 +extern IA64FAULT vcpu_get_iim(VCPU * vcpu, u64 * pval);
  13.142 +extern IA64FAULT vcpu_get_iha(VCPU * vcpu, u64 * pval);
  13.143 +extern IA64FAULT vcpu_get_lid(VCPU * vcpu, u64 * pval);
  13.144 +extern IA64FAULT vcpu_get_tpr(VCPU * vcpu, u64 * pval);
  13.145 +extern IA64FAULT vcpu_get_irr0(VCPU * vcpu, u64 * pval);
  13.146 +extern IA64FAULT vcpu_get_irr1(VCPU * vcpu, u64 * pval);
  13.147 +extern IA64FAULT vcpu_get_irr2(VCPU * vcpu, u64 * pval);
  13.148 +extern IA64FAULT vcpu_get_irr3(VCPU * vcpu, u64 * pval);
  13.149 +extern IA64FAULT vcpu_get_lrr0(VCPU * vcpu, u64 * pval);
  13.150 +extern IA64FAULT vcpu_get_lrr1(VCPU * vcpu, u64 * pval);
  13.151  /* interrupt registers */
  13.152 -extern void vcpu_pend_unspecified_interrupt(VCPU *vcpu);
  13.153 -extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
  13.154 -extern IA64FAULT vcpu_get_itv(VCPU *vcpu,UINT64 *pval);
  13.155 -extern IA64FAULT vcpu_get_pmv(VCPU *vcpu,UINT64 *pval);
  13.156 -extern IA64FAULT vcpu_get_cmcv(VCPU *vcpu,UINT64 *pval);
  13.157 -extern IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval);
  13.158 -extern IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val);
  13.159 -extern IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val);
  13.160 -extern IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val);
  13.161 +extern void vcpu_pend_unspecified_interrupt(VCPU * vcpu);
  13.162 +extern u64 vcpu_check_pending_interrupts(VCPU * vcpu);
  13.163 +extern IA64FAULT vcpu_get_itv(VCPU * vcpu, u64 * pval);
  13.164 +extern IA64FAULT vcpu_get_pmv(VCPU * vcpu, u64 * pval);
  13.165 +extern IA64FAULT vcpu_get_cmcv(VCPU * vcpu, u64 * pval);
  13.166 +extern IA64FAULT vcpu_get_ivr(VCPU * vcpu, u64 * pval);
  13.167 +extern IA64FAULT vcpu_set_itv(VCPU * vcpu, u64 val);
  13.168 +extern IA64FAULT vcpu_set_pmv(VCPU * vcpu, u64 val);
  13.169 +extern IA64FAULT vcpu_set_cmcv(VCPU * vcpu, u64 val);
  13.170  /* interval timer registers */
  13.171 -extern IA64FAULT vcpu_set_itc(VCPU *vcpu,UINT64 val);
  13.172 -extern UINT64 vcpu_timer_pending_early(VCPU *vcpu);
  13.173 +extern IA64FAULT vcpu_set_itc(VCPU * vcpu, u64 val);
  13.174 +extern u64 vcpu_timer_pending_early(VCPU * vcpu);
  13.175  /* debug breakpoint registers */
  13.176 -extern IA64FAULT vcpu_set_ibr(VCPU *vcpu,UINT64 reg,UINT64 val);
  13.177 -extern IA64FAULT vcpu_set_dbr(VCPU *vcpu,UINT64 reg,UINT64 val);
  13.178 -extern IA64FAULT vcpu_get_ibr(VCPU *vcpu,UINT64 reg,UINT64 *pval);
  13.179 -extern IA64FAULT vcpu_get_dbr(VCPU *vcpu,UINT64 reg,UINT64 *pval);
  13.180 +extern IA64FAULT vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val);
  13.181 +extern IA64FAULT vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val);
  13.182 +extern IA64FAULT vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval);
  13.183 +extern IA64FAULT vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval);
  13.184  /* performance monitor registers */
  13.185 -extern IA64FAULT vcpu_set_pmc(VCPU *vcpu,UINT64 reg,UINT64 val);
  13.186 -extern IA64FAULT vcpu_set_pmd(VCPU *vcpu,UINT64 reg,UINT64 val);
  13.187 -extern IA64FAULT vcpu_get_pmc(VCPU *vcpu,UINT64 reg,UINT64 *pval);
  13.188 -extern IA64FAULT vcpu_get_pmd(VCPU *vcpu,UINT64 reg,UINT64 *pval);
  13.189 +extern IA64FAULT vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val);
  13.190 +extern IA64FAULT vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val);
  13.191 +extern IA64FAULT vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval);
  13.192 +extern IA64FAULT vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval);
  13.193  /* banked general registers */
  13.194 -extern IA64FAULT vcpu_bsw0(VCPU *vcpu);
  13.195 -extern IA64FAULT vcpu_bsw1(VCPU *vcpu);
  13.196 +extern IA64FAULT vcpu_bsw0(VCPU * vcpu);
  13.197 +extern IA64FAULT vcpu_bsw1(VCPU * vcpu);
  13.198  /* region registers */
  13.199 -extern IA64FAULT vcpu_set_rr(VCPU *vcpu,UINT64 reg,UINT64 val);
  13.200 -extern IA64FAULT vcpu_get_rr(VCPU *vcpu,UINT64 reg,UINT64 *pval);
  13.201 -extern IA64FAULT vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr);
  13.202 +extern IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val);
  13.203 +extern IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval);
  13.204 +extern IA64FAULT vcpu_get_rr_ve(VCPU * vcpu, u64 vadr);
  13.205  /* protection key registers */
  13.206 -extern IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
  13.207 -extern IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
  13.208 -extern IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
  13.209 +extern IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval);
  13.210 +extern IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val);
  13.211 +extern IA64FAULT vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key);
  13.212  /* TLB */
  13.213 -static inline void vcpu_purge_tr_entry(TR_ENTRY *trp)
  13.214 +static inline void vcpu_purge_tr_entry(TR_ENTRY * trp)
  13.215  {
  13.216  	trp->pte.val = 0;
  13.217  }
  13.218 -extern IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 padr,
  13.219 -		UINT64 itir, UINT64 ifa);
  13.220 -extern IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 padr,
  13.221 -		UINT64 itir, UINT64 ifa);
  13.222 -extern IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 padr, UINT64 itir, UINT64 ifa);
  13.223 -extern IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 padr, UINT64 itir, UINT64 ifa);
  13.224 -extern IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 log_range);
  13.225 -extern IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr);
  13.226 -extern IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
  13.227 -extern IA64FAULT vcpu_ptc_ga(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
  13.228 -extern IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr, UINT64 log_range);
  13.229 -extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 log_range);
  13.230 +extern IA64FAULT vcpu_itr_d(VCPU * vcpu, u64 slot, u64 padr, u64 itir, u64 ifa);
  13.231 +extern IA64FAULT vcpu_itr_i(VCPU * vcpu, u64 slot, u64 padr, u64 itir, u64 ifa);
  13.232 +extern IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 padr, u64 itir, u64 ifa);
  13.233 +extern IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 padr, u64 itir, u64 ifa);
  13.234 +extern IA64FAULT vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 log_range);
  13.235 +extern IA64FAULT vcpu_ptc_e(VCPU * vcpu, u64 vadr);
  13.236 +extern IA64FAULT vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 addr_range);
  13.237 +extern IA64FAULT vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 addr_range);
  13.238 +extern IA64FAULT vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 log_range);
  13.239 +extern IA64FAULT vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 log_range);
  13.240  union U_IA64_BUNDLE;
  13.241 -extern int vcpu_get_domain_bundle(VCPU *vcpu, REGS *regs, UINT64 gip, union U_IA64_BUNDLE *bundle);
  13.242 -extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data,
  13.243 -				UINT64 *pteval, UINT64 *itir, UINT64 *iha);
  13.244 -extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
  13.245 -extern IA64FAULT vcpu_force_inst_miss(VCPU *vcpu, UINT64 ifa);
  13.246 -extern IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa);
  13.247 -extern IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr);
  13.248 +extern int vcpu_get_domain_bundle(VCPU * vcpu, REGS * regs, u64 gip,
  13.249 +                                  union U_IA64_BUNDLE *bundle);
  13.250 +extern IA64FAULT vcpu_translate(VCPU * vcpu, u64 address, BOOLEAN is_data,
  13.251 +                                u64 * pteval, u64 * itir, u64 * iha);
  13.252 +extern IA64FAULT vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr);
  13.253 +extern IA64FAULT vcpu_force_inst_miss(VCPU * vcpu, u64 ifa);
  13.254 +extern IA64FAULT vcpu_force_data_miss(VCPU * vcpu, u64 ifa);
  13.255 +extern IA64FAULT vcpu_fc(VCPU * vcpu, u64 vadr);
  13.256  /* misc */
  13.257 -extern IA64FAULT vcpu_rfi(VCPU *vcpu);
  13.258 -extern IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
  13.259 -extern IA64FAULT vcpu_cover(VCPU *vcpu);
  13.260 -extern IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
  13.261 -extern IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval);
  13.262 +extern IA64FAULT vcpu_rfi(VCPU * vcpu);
  13.263 +extern IA64FAULT vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval);
  13.264 +extern IA64FAULT vcpu_cover(VCPU * vcpu);
  13.265 +extern IA64FAULT vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * padr);
  13.266 +extern IA64FAULT vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval);
  13.267  
  13.268 -extern void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector);
  13.269 -extern void vcpu_pend_timer(VCPU *vcpu);
  13.270 -extern void vcpu_poke_timer(VCPU *vcpu);
  13.271 -extern void vcpu_set_next_timer(VCPU *vcpu);
  13.272 -extern BOOLEAN vcpu_timer_expired(VCPU *vcpu);
  13.273 -extern UINT64 vcpu_deliverable_interrupts(VCPU *vcpu);
  13.274 +extern void vcpu_pend_interrupt(VCPU * vcpu, u64 vector);
  13.275 +extern void vcpu_pend_timer(VCPU * vcpu);
  13.276 +extern void vcpu_poke_timer(VCPU * vcpu);
  13.277 +extern void vcpu_set_next_timer(VCPU * vcpu);
  13.278 +extern BOOLEAN vcpu_timer_expired(VCPU * vcpu);
  13.279 +extern u64 vcpu_deliverable_interrupts(VCPU * vcpu);
  13.280  struct p2m_entry;
  13.281 -extern void vcpu_itc_no_srlz(VCPU *vcpu, UINT64, UINT64, UINT64, UINT64, UINT64, struct p2m_entry*);
  13.282 -extern UINT64 vcpu_get_tmp(VCPU *, UINT64);
  13.283 -extern void vcpu_set_tmp(VCPU *, UINT64, UINT64);
  13.284 +extern void vcpu_itc_no_srlz(VCPU * vcpu, u64, u64, u64, u64, u64,
  13.285 +                             struct p2m_entry *);
  13.286 +extern u64 vcpu_get_tmp(VCPU *, u64);
  13.287 +extern void vcpu_set_tmp(VCPU *, u64, u64);
  13.288  
  13.289 -extern IA64FAULT vcpu_set_dtr(VCPU *vcpu, u64 slot,
  13.290 +extern IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 slot,
  13.291                                u64 pte, u64 itir, u64 ifa, u64 rid);
  13.292 -extern IA64FAULT vcpu_set_itr(VCPU *vcpu, u64 slot,
  13.293 +extern IA64FAULT vcpu_set_itr(VCPU * vcpu, u64 slot,
  13.294                                u64 pte, u64 itir, u64 ifa, u64 rid);
  13.295  
  13.296  /* Initialize vcpu regs.  */
  13.297 -extern void vcpu_init_regs (struct vcpu *v);
  13.298 +extern void vcpu_init_regs(struct vcpu *v);
  13.299  
  13.300 -static inline UINT64
  13.301 -itir_ps(UINT64 itir)
  13.302 +static inline u64 itir_ps(u64 itir)
  13.303  {
  13.304 -    return ((itir >> 2) & 0x3f);
  13.305 +	return ((itir >> 2) & 0x3f);
  13.306  }
  13.307  
  13.308 -static inline UINT64
  13.309 -itir_mask(UINT64 itir)
  13.310 +static inline u64 itir_mask(u64 itir)
  13.311  {
  13.312 -    return (~((1UL << itir_ps(itir)) - 1));
  13.313 +	return (~((1UL << itir_ps(itir)) - 1));
  13.314  }
  13.315  
  13.316 -static inline s64
  13.317 -vcpu_get_next_timer_ns(VCPU *vcpu)
  13.318 +static inline s64 vcpu_get_next_timer_ns(VCPU * vcpu)
  13.319  {
  13.320 -    s64 vcpu_get_next_timer_ns;
  13.321 -    u64 d = PSCBX(vcpu, domain_itm);
  13.322 -    u64 now = ia64_get_itc();
  13.323 +	s64 vcpu_get_next_timer_ns;
  13.324 +	u64 d = PSCBX(vcpu, domain_itm);
  13.325 +	u64 now = ia64_get_itc();
  13.326  
  13.327 -    if (d > now)
  13.328 -        vcpu_get_next_timer_ns = cycle_to_ns(d - now) + NOW();
  13.329 -    else
  13.330 -        vcpu_get_next_timer_ns = cycle_to_ns(local_cpu_data->itm_delta) + NOW();
  13.331 +	if (d > now)
  13.332 +		vcpu_get_next_timer_ns = cycle_to_ns(d - now) + NOW();
  13.333 +	else
  13.334 +		vcpu_get_next_timer_ns =
  13.335 +		    cycle_to_ns(local_cpu_data->itm_delta) + NOW();
  13.336  
  13.337 -    return vcpu_get_next_timer_ns;
  13.338 +	return vcpu_get_next_timer_ns;
  13.339  }
  13.340  
  13.341  #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
  13.342 @@ -209,5 +208,4 @@ vcpu_get_next_timer_ns(VCPU *vcpu)
  13.343  #define vcpu_quick_region_set(_tr_regions,_ifa)             \
  13.344      do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
  13.345  
  13.346 -
  13.347  #endif
    14.1 --- a/xen/include/asm-ia64/vmx_pal_vsa.h	Tue Oct 17 14:30:36 2006 -0600
    14.2 +++ b/xen/include/asm-ia64/vmx_pal_vsa.h	Tue Oct 17 15:43:41 2006 -0600
    14.3 @@ -26,10 +26,9 @@
    14.4  /* PAL virtualization services */
    14.5  
    14.6  #ifndef __ASSEMBLY__
    14.7 -extern UINT64 ia64_call_vsa(UINT64 proc,UINT64 arg1, UINT64 arg2,
    14.8 -                   UINT64 arg3, UINT64 arg4, UINT64 arg5,
    14.9 -                   UINT64 arg6, UINT64 arg7);
   14.10 -extern UINT64 __vsa_base;
   14.11 +extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
   14.12 +                         u64 arg4, u64 arg5, u64 arg6, u64 arg7);
   14.13 +extern u64 __vsa_base;
   14.14  #endif  /* __ASSEMBLY__ */
   14.15  
   14.16  #define PAL_VPS_RESUME_NORMAL           0x0000
    15.1 --- a/xen/include/asm-ia64/vmx_phy_mode.h	Tue Oct 17 14:30:36 2006 -0600
    15.2 +++ b/xen/include/asm-ia64/vmx_phy_mode.h	Tue Oct 17 15:43:41 2006 -0600
    15.3 @@ -90,7 +90,7 @@ extern void physical_mode_init(VCPU *);
    15.4  extern void switch_to_physical_rid(VCPU *);
    15.5  extern void switch_to_virtual_rid(VCPU *vcpu);
    15.6  extern void switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr);
    15.7 -extern void stlb_phys_lookup(VCPU *vcpu, UINT64 paddr, UINT64 type);
    15.8 +extern void stlb_phys_lookup(VCPU *vcpu, u64 paddr, u64 type);
    15.9  extern void check_mm_mode_switch (VCPU *vcpu,  IA64_PSR old_psr, IA64_PSR new_psr);
   15.10  extern void prepare_if_physical_mode(VCPU *vcpu);
   15.11  extern void recover_if_physical_mode(VCPU *vcpu);
   15.12 @@ -120,9 +120,4 @@ extern void physical_tlb_miss(VCPU *vcpu
   15.13  #define GUEST_VIRT  1   /* Guest in virtual mode */
   15.14  #define GUEST_PHYS  2   /* Guest in physical mode, requiring emulation */
   15.15  
   15.16 -
   15.17 -
   15.18  #endif /* _PHY_MODE_H_ */
   15.19 -
   15.20 -
   15.21 -
    16.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Tue Oct 17 14:30:36 2006 -0600
    16.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Tue Oct 17 15:43:41 2006 -0600
    16.3 @@ -1,4 +1,4 @@
    16.4 -/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    16.5 +/* -*-  Mode:C; c-basic-offset:8; tab-width:8; indent-tabs-mode:nil -*- */
    16.6  /*
    16.7   * vmx_vcpu.h:
    16.8   * Copyright (c) 2005, Intel Corporation.
    16.9 @@ -23,7 +23,6 @@
   16.10  #ifndef _XEN_IA64_VMX_VCPU_H
   16.11  #define _XEN_IA64_VMX_VCPU_H
   16.12  
   16.13 -
   16.14  #include <xen/sched.h>
   16.15  #include <asm/ia64_int.h>
   16.16  #include <asm/vmx_vpd.h>
   16.17 @@ -33,464 +32,438 @@
   16.18  #include <asm/types.h>
   16.19  #include <asm/vcpu.h>
   16.20  
   16.21 -#define VRN_SHIFT    61
   16.22 -#define VRN0    0x0UL
   16.23 -#define VRN1    0x1UL
   16.24 -#define VRN2    0x2UL
   16.25 -#define VRN3    0x3UL
   16.26 -#define VRN4    0x4UL
   16.27 -#define VRN5    0x5UL
   16.28 -#define VRN6    0x6UL
   16.29 -#define VRN7    0x7UL
   16.30 +#define VRN_SHIFT	61
   16.31 +#define VRN0		0x0UL
   16.32 +#define VRN1		0x1UL
   16.33 +#define VRN2		0x2UL
   16.34 +#define VRN3		0x3UL
   16.35 +#define VRN4		0x4UL
   16.36 +#define VRN5		0x5UL
   16.37 +#define VRN6		0x6UL
   16.38 +#define VRN7		0x7UL
   16.39  // for vlsapic
   16.40 -#define  VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
   16.41 +#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
   16.42  
   16.43  #define VMX(x,y)  ((x)->arch.arch_vmx.y)
   16.44  
   16.45 -
   16.46 -#define VMM_RR_SHIFT    20
   16.47 -#define VMM_RR_MASK     ((1UL<<VMM_RR_SHIFT)-1)
   16.48 +#define VMM_RR_SHIFT	20
   16.49 +#define VMM_RR_MASK	((1UL<<VMM_RR_SHIFT)-1)
   16.50  
   16.51 -extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
   16.52 -extern u64 cr_igfld_mask (int index, u64 value);
   16.53 -extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
   16.54 -extern u64 set_isr_ei_ni (VCPU *vcpu);
   16.55 -extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
   16.56 -
   16.57 +extern u64 indirect_reg_igfld_MASK(int type, int index, u64 value);
   16.58 +extern u64 cr_igfld_mask(int index, u64 value);
   16.59 +extern int check_indirect_reg_rsv_fields(int type, int index, u64 value);
   16.60 +extern u64 set_isr_ei_ni(VCPU * vcpu);
   16.61 +extern u64 set_isr_for_na_inst(VCPU * vcpu, int op);
   16.62  
   16.63  /* next all for VTI domain APIs definition */
   16.64 -extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
   16.65 -extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
   16.66 -extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value);
   16.67 -extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
   16.68 -extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
   16.69 -extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
   16.70 -IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
   16.71 -extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
   16.72 -extern IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
   16.73 -extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte, UINT64 itir, UINT64 ifa);
   16.74 -extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte, UINT64 itir, UINT64 ifa);
   16.75 -extern IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps);
   16.76 -extern IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps);
   16.77 -extern IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps);
   16.78 -extern IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr);
   16.79 -extern IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps);
   16.80 -extern IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps);
   16.81 -extern IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
   16.82 -extern u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa);
   16.83 -extern IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
   16.84 -extern IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
   16.85 -extern IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
   16.86 -extern IA64FAULT vmx_vcpu_rfi(VCPU *vcpu);
   16.87 -extern UINT64 vmx_vcpu_get_psr(VCPU *vcpu);
   16.88 -extern IA64FAULT vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val);
   16.89 -extern IA64FAULT vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat);
   16.90 +extern void vmx_vcpu_set_psr(VCPU * vcpu, unsigned long value);
   16.91 +extern u64 vmx_vcpu_sync_mpsr(u64 mipsr, u64 value);
   16.92 +extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, u64 value);
   16.93 +extern IA64FAULT vmx_vcpu_cover(VCPU * vcpu);
   16.94 +extern IA64FAULT vmx_vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val);
   16.95 +extern IA64FAULT vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval);
   16.96 +IA64FAULT vmx_vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val);
   16.97 +extern IA64FAULT vmx_vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
   16.98 +extern IA64FAULT vmx_vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
   16.99 +extern IA64FAULT vmx_vcpu_itr_i(VCPU * vcpu, u64 slot, u64 pte, u64 itir,
  16.100 +                                u64 ifa);
  16.101 +extern IA64FAULT vmx_vcpu_itr_d(VCPU * vcpu, u64 slot, u64 pte, u64 itir,
  16.102 +                                u64 ifa);
  16.103 +extern IA64FAULT vmx_vcpu_ptr_d(VCPU * vcpu, u64 vadr, u64 ps);
  16.104 +extern IA64FAULT vmx_vcpu_ptr_i(VCPU * vcpu, u64 vadr, u64 ps);
  16.105 +extern IA64FAULT vmx_vcpu_ptc_l(VCPU * vcpu, u64 vadr, u64 ps);
  16.106 +extern IA64FAULT vmx_vcpu_ptc_e(VCPU * vcpu, u64 vadr);
  16.107 +extern IA64FAULT vmx_vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 ps);
  16.108 +extern IA64FAULT vmx_vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 ps);
  16.109 +extern IA64FAULT vmx_vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval);
  16.110 +extern u64 vmx_vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa);
  16.111 +extern IA64FAULT vmx_vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * pval);
  16.112 +extern IA64FAULT vmx_vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr);
  16.113 +extern IA64FAULT vmx_vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key);
  16.114 +extern IA64FAULT vmx_vcpu_rfi(VCPU * vcpu);
  16.115 +extern u64 vmx_vcpu_get_psr(VCPU * vcpu);
  16.116 +extern IA64FAULT vmx_vcpu_get_bgr(VCPU * vcpu, unsigned int reg, u64 * val);
  16.117 +extern IA64FAULT vmx_vcpu_set_bgr(VCPU * vcpu, unsigned int reg, u64 val,
  16.118 +                                  int nat);
  16.119  #if 0
  16.120 -extern IA64FAULT vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val);
  16.121 -extern IA64FAULT vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat);
  16.122 +extern IA64FAULT vmx_vcpu_get_gr(VCPU * vcpu, unsigned reg, u64 * val);
  16.123 +extern IA64FAULT vmx_vcpu_set_gr(VCPU * vcpu, unsigned reg, u64 value, int nat);
  16.124  #endif
  16.125 -extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24);
  16.126 -extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24);
  16.127 -extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
  16.128 -extern void vtm_init(VCPU *vcpu);
  16.129 -extern uint64_t vtm_get_itc(VCPU *vcpu);
  16.130 -extern void vtm_set_itc(VCPU *vcpu, uint64_t new_itc);
  16.131 -extern void vtm_set_itv(VCPU *vcpu, uint64_t val);
  16.132 -extern void vtm_set_itm(VCPU *vcpu, uint64_t val);
  16.133 -extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
  16.134 +extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24);
  16.135 +extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU * vcpu, u64 imm24);
  16.136 +extern IA64FAULT vmx_vcpu_set_psr_l(VCPU * vcpu, u64 val);
  16.137 +extern void vtm_init(VCPU * vcpu);
  16.138 +extern uint64_t vtm_get_itc(VCPU * vcpu);
  16.139 +extern void vtm_set_itc(VCPU * vcpu, uint64_t new_itc);
  16.140 +extern void vtm_set_itv(VCPU * vcpu, uint64_t val);
  16.141 +extern void vtm_set_itm(VCPU * vcpu, uint64_t val);
  16.142 +extern void vtm_interruption_update(VCPU * vcpu, vtime_t * vtm);
  16.143  //extern void vtm_domain_out(VCPU *vcpu);
  16.144  //extern void vtm_domain_in(VCPU *vcpu);
  16.145 -extern void vlsapic_reset(VCPU *vcpu);
  16.146 -extern int vmx_check_pending_irq(VCPU *vcpu);
  16.147 -extern void guest_write_eoi(VCPU *vcpu);
  16.148 -extern int is_unmasked_irq(VCPU *vcpu);
  16.149 -extern uint64_t guest_read_vivr(VCPU *vcpu);
  16.150 -extern void vmx_inject_vhpi(VCPU *vcpu, u8 vec);
  16.151 -extern int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector);
  16.152 -extern struct virtual_platform_def *vmx_vcpu_get_plat(VCPU *vcpu);
  16.153 -extern void memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
  16.154 -extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
  16.155 -extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
  16.156 -extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
  16.157 -extern void vcpu_load_kernel_regs(VCPU *vcpu);
  16.158 -extern IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu);
  16.159 -extern IA64FAULT vmx_vcpu_decrement_iip(VCPU *vcpu);
  16.160 -extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
  16.161 +extern void vlsapic_reset(VCPU * vcpu);
  16.162 +extern int vmx_check_pending_irq(VCPU * vcpu);
  16.163 +extern void guest_write_eoi(VCPU * vcpu);
  16.164 +extern int is_unmasked_irq(VCPU * vcpu);
  16.165 +extern uint64_t guest_read_vivr(VCPU * vcpu);
  16.166 +extern void vmx_inject_vhpi(VCPU * vcpu, u8 vec);
  16.167 +extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
  16.168 +extern struct virtual_platform_def *vmx_vcpu_get_plat(VCPU * vcpu);
  16.169 +extern void memread_p(VCPU * vcpu, u64 * src, u64 * dest, size_t s);
  16.170 +extern void memread_v(VCPU * vcpu, thash_data_t * vtlb, u64 * src, u64 * dest,
  16.171 +                      size_t s);
  16.172 +extern void memwrite_v(VCPU * vcpu, thash_data_t * vtlb, u64 * src, u64 * dest,
  16.173 +                       size_t s);
  16.174 +extern void memwrite_p(VCPU * vcpu, u64 * src, u64 * dest, size_t s);
  16.175 +extern void vcpu_load_kernel_regs(VCPU * vcpu);
  16.176 +extern IA64FAULT vmx_vcpu_increment_iip(VCPU * vcpu);
  16.177 +extern IA64FAULT vmx_vcpu_decrement_iip(VCPU * vcpu);
  16.178 +extern void vmx_switch_rr7(unsigned long, shared_info_t *, void *, void *,
  16.179 +                           void *);
  16.180  
  16.181 -extern void dtlb_fault (VCPU *vcpu, u64 vadr);
  16.182 -extern void nested_dtlb (VCPU *vcpu);
  16.183 -extern void alt_dtlb (VCPU *vcpu, u64 vadr);
  16.184 -extern void dvhpt_fault (VCPU *vcpu, u64 vadr);
  16.185 -extern void dnat_page_consumption (VCPU *vcpu, uint64_t vadr);
  16.186 -extern void data_page_not_present(VCPU *vcpu, u64 vadr);
  16.187 -extern void inst_page_not_present(VCPU *vcpu, u64 vadr);
  16.188 -extern void data_access_rights(VCPU *vcpu, u64 vadr);
  16.189 +extern void dtlb_fault(VCPU * vcpu, u64 vadr);
  16.190 +extern void nested_dtlb(VCPU * vcpu);
  16.191 +extern void alt_dtlb(VCPU * vcpu, u64 vadr);
  16.192 +extern void dvhpt_fault(VCPU * vcpu, u64 vadr);
  16.193 +extern void dnat_page_consumption(VCPU * vcpu, uint64_t vadr);
  16.194 +extern void data_page_not_present(VCPU * vcpu, u64 vadr);
  16.195 +extern void inst_page_not_present(VCPU * vcpu, u64 vadr);
  16.196 +extern void data_access_rights(VCPU * vcpu, u64 vadr);
  16.197  
  16.198  /**************************************************************************
  16.199   VCPU control register access routines
  16.200  **************************************************************************/
  16.201  
  16.202 -static inline
  16.203 -IA64FAULT vmx_vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
  16.204 +static inline IA64FAULT vmx_vcpu_get_dcr(VCPU * vcpu, u64 * pval)
  16.205 +{
  16.206 +	*pval = VCPU(vcpu, dcr);
  16.207 +	return IA64_NO_FAULT;
  16.208 +}
  16.209 +
  16.210 +static inline IA64FAULT vmx_vcpu_get_itm(VCPU * vcpu, u64 * pval)
  16.211 +{
  16.212 +	*pval = VCPU(vcpu, itm);
  16.213 +	return IA64_NO_FAULT;
  16.214 +}
  16.215 +
  16.216 +static inline IA64FAULT vmx_vcpu_get_iva(VCPU * vcpu, u64 * pval)
  16.217  {
  16.218 -    *pval = VCPU(vcpu,dcr);
  16.219 -    return (IA64_NO_FAULT);
  16.220 +	*pval = VCPU(vcpu, iva);
  16.221 +	return IA64_NO_FAULT;
  16.222 +}
  16.223 +
  16.224 +static inline IA64FAULT vmx_vcpu_get_pta(VCPU * vcpu, u64 * pval)
  16.225 +{
  16.226 +	*pval = VCPU(vcpu, pta);
  16.227 +	return IA64_NO_FAULT;
  16.228 +}
  16.229 +
  16.230 +static inline IA64FAULT vmx_vcpu_get_lid(VCPU * vcpu, u64 * pval)
  16.231 +{
  16.232 +	*pval = VCPU(vcpu, lid);
  16.233 +	return IA64_NO_FAULT;
  16.234 +}
  16.235 +
  16.236 +static inline IA64FAULT vmx_vcpu_get_ivr(VCPU * vcpu, u64 * pval)
  16.237 +{
  16.238 +	*pval = guest_read_vivr(vcpu);
  16.239 +	return IA64_NO_FAULT;
  16.240  }
  16.241  
  16.242 -static inline
  16.243 -IA64FAULT vmx_vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
  16.244 +static inline IA64FAULT vmx_vcpu_get_tpr(VCPU * vcpu, u64 * pval)
  16.245 +{
  16.246 +	*pval = VCPU(vcpu, tpr);
  16.247 +	return IA64_NO_FAULT;
  16.248 +}
  16.249 +
  16.250 +static inline IA64FAULT vmx_vcpu_get_eoi(VCPU * vcpu, u64 * pval)
  16.251  {
  16.252 -    *pval = VCPU(vcpu,itm);
  16.253 -    return (IA64_NO_FAULT);
  16.254 +	*pval = 0L;		// reads of eoi always return 0
  16.255 +	return IA64_NO_FAULT;
  16.256 +}
  16.257 +
  16.258 +static inline IA64FAULT vmx_vcpu_get_irr0(VCPU * vcpu, u64 * pval)
  16.259 +{
  16.260 +	*pval = VCPU(vcpu, irr[0]);
  16.261 +	return IA64_NO_FAULT;
  16.262  }
  16.263  
  16.264 -static inline
  16.265 -IA64FAULT vmx_vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
  16.266 +static inline IA64FAULT vmx_vcpu_get_irr1(VCPU * vcpu, u64 * pval)
  16.267  {
  16.268 -    *pval = VCPU(vcpu,iva);
  16.269 -    return (IA64_NO_FAULT);
  16.270 +	*pval = VCPU(vcpu, irr[1]);
  16.271 +	return IA64_NO_FAULT;
  16.272 +}
  16.273 +
  16.274 +static inline IA64FAULT vmx_vcpu_get_irr2(VCPU * vcpu, u64 * pval)
  16.275 +{
  16.276 +	*pval = VCPU(vcpu, irr[2]);
  16.277 +	return IA64_NO_FAULT;
  16.278  }
  16.279 -static inline
  16.280 -IA64FAULT vmx_vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
  16.281 +
  16.282 +static inline IA64FAULT vmx_vcpu_get_irr3(VCPU * vcpu, u64 * pval)
  16.283 +{
  16.284 +	*pval = VCPU(vcpu, irr[3]);
  16.285 +	return IA64_NO_FAULT;
  16.286 +}
  16.287 +
  16.288 +static inline IA64FAULT vmx_vcpu_get_itv(VCPU * vcpu, u64 * pval)
  16.289  {
  16.290 -    *pval = VCPU(vcpu,pta);
  16.291 -    return (IA64_NO_FAULT);
  16.292 +	*pval = VCPU(vcpu, itv);
  16.293 +	return IA64_NO_FAULT;
  16.294 +}
  16.295 +
  16.296 +static inline IA64FAULT vmx_vcpu_get_pmv(VCPU * vcpu, u64 * pval)
  16.297 +{
  16.298 +	*pval = VCPU(vcpu, pmv);
  16.299 +	return IA64_NO_FAULT;
  16.300  }
  16.301  
  16.302 -static inline
  16.303 -IA64FAULT vmx_vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
  16.304 -{
  16.305 -    *pval = VCPU(vcpu,lid);
  16.306 -    return (IA64_NO_FAULT);
  16.307 -}
  16.308 -static inline
  16.309 -IA64FAULT vmx_vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
  16.310 +static inline IA64FAULT vmx_vcpu_get_cmcv(VCPU * vcpu, u64 * pval)
  16.311  {
  16.312 -    *pval = guest_read_vivr(vcpu);
  16.313 -    return (IA64_NO_FAULT);
  16.314 +	*pval = VCPU(vcpu, cmcv);
  16.315 +	return IA64_NO_FAULT;
  16.316  }
  16.317 -static inline
  16.318 -IA64FAULT vmx_vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
  16.319 -{
  16.320 -    *pval = VCPU(vcpu,tpr);
  16.321 -    return (IA64_NO_FAULT);
  16.322 -}
  16.323 -static inline
  16.324 -IA64FAULT vmx_vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
  16.325 +
  16.326 +static inline IA64FAULT vmx_vcpu_get_lrr0(VCPU * vcpu, u64 * pval)
  16.327  {
  16.328 -    *pval = 0L;  // reads of eoi always return 0
  16.329 -    return (IA64_NO_FAULT);
  16.330 +	*pval = VCPU(vcpu, lrr0);
  16.331 +	return IA64_NO_FAULT;
  16.332  }
  16.333 -static inline
  16.334 -IA64FAULT vmx_vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
  16.335 -{
  16.336 -    *pval = VCPU(vcpu,irr[0]);
  16.337 -    return (IA64_NO_FAULT);
  16.338 -}
  16.339 -static inline
  16.340 -IA64FAULT vmx_vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
  16.341 +
  16.342 +static inline IA64FAULT vmx_vcpu_get_lrr1(VCPU * vcpu, u64 * pval)
  16.343  {
  16.344 -    *pval = VCPU(vcpu,irr[1]);
  16.345 -    return (IA64_NO_FAULT);
  16.346 +	*pval = VCPU(vcpu, lrr1);
  16.347 +	return IA64_NO_FAULT;
  16.348  }
  16.349 -static inline
  16.350 -IA64FAULT vmx_vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
  16.351 -{
  16.352 -    *pval = VCPU(vcpu,irr[2]);
  16.353 -    return (IA64_NO_FAULT);
  16.354 -}
  16.355 -static inline
  16.356 -IA64FAULT vmx_vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
  16.357 +
  16.358 +static inline IA64FAULT vmx_vcpu_set_dcr(VCPU * vcpu, u64 val)
  16.359  {
  16.360 -    *pval = VCPU(vcpu,irr[3]);
  16.361 -    return (IA64_NO_FAULT);
  16.362 -}
  16.363 -static inline
  16.364 -IA64FAULT vmx_vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
  16.365 -{
  16.366 -    *pval = VCPU(vcpu,itv);
  16.367 -    return (IA64_NO_FAULT);
  16.368 +	u64 mdcr, mask;
  16.369 +	VCPU(vcpu, dcr) = val;
  16.370 +	/* All vDCR bits will go to mDCR, except for be/pp/dm bits */
  16.371 +	mdcr = ia64_get_dcr();
  16.372 +	/* Machine dcr.dm masked to handle guest ld.s on tr mapped page */
  16.373 +	mask = IA64_DCR_BE | IA64_DCR_PP | IA64_DCR_DM;
  16.374 +	mdcr = (mdcr & mask) | (val & (~mask));
  16.375 +	ia64_set_dcr(mdcr);
  16.376 +	VMX(vcpu, mdcr) = mdcr;
  16.377 +	return IA64_NO_FAULT;
  16.378  }
  16.379 -static inline
  16.380 -IA64FAULT vmx_vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
  16.381 -{
  16.382 -    *pval = VCPU(vcpu,pmv);
  16.383 -    return (IA64_NO_FAULT);
  16.384 -}
  16.385 -static inline
  16.386 -IA64FAULT vmx_vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
  16.387 -{
  16.388 -    *pval = VCPU(vcpu,cmcv);
  16.389 -    return (IA64_NO_FAULT);
  16.390 -}
  16.391 -static inline
  16.392 -IA64FAULT vmx_vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
  16.393 +
  16.394 +static inline IA64FAULT vmx_vcpu_set_itm(VCPU * vcpu, u64 val)
  16.395  {
  16.396 -    *pval = VCPU(vcpu,lrr0);
  16.397 -    return (IA64_NO_FAULT);
  16.398 -}
  16.399 -static inline
  16.400 -IA64FAULT vmx_vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
  16.401 -{
  16.402 -    *pval = VCPU(vcpu,lrr1);
  16.403 -    return (IA64_NO_FAULT);
  16.404 +	vtm_set_itm(vcpu, val);
  16.405 +	return IA64_NO_FAULT;
  16.406  }
  16.407 -static inline
  16.408 -IA64FAULT
  16.409 -vmx_vcpu_set_dcr(VCPU *vcpu, u64 val)
  16.410 +
  16.411 +static inline IA64FAULT vmx_vcpu_set_iva(VCPU * vcpu, u64 val)
  16.412  {
  16.413 -    u64 mdcr, mask;
  16.414 -    VCPU(vcpu,dcr)=val;
  16.415 -    /* All vDCR bits will go to mDCR, except for be/pp/dm bits */
  16.416 -    mdcr = ia64_get_dcr();
  16.417 -    /* Machine dcr.dm masked to handle guest ld.s on tr mapped page */
  16.418 -    mask = IA64_DCR_BE | IA64_DCR_PP | IA64_DCR_DM;
  16.419 -    mdcr = ( mdcr & mask ) | ( val & (~mask) );
  16.420 -    ia64_set_dcr( mdcr);
  16.421 -    VMX(vcpu, mdcr) = mdcr;
  16.422 -    return IA64_NO_FAULT;
  16.423 +	VCPU(vcpu, iva) = val;
  16.424 +	return IA64_NO_FAULT;
  16.425  }
  16.426  
  16.427 -static inline
  16.428 -IA64FAULT
  16.429 -vmx_vcpu_set_itm(VCPU *vcpu, u64 val)
  16.430 +static inline IA64FAULT vmx_vcpu_set_pta(VCPU * vcpu, u64 val)
  16.431  {
  16.432 -    vtm_set_itm(vcpu, val);
  16.433 -    return IA64_NO_FAULT;
  16.434 -}
  16.435 -static inline
  16.436 -IA64FAULT
  16.437 -vmx_vcpu_set_iva(VCPU *vcpu, u64 val)
  16.438 -{
  16.439 -    VCPU(vcpu,iva)=val;
  16.440 -    return IA64_NO_FAULT;
  16.441 +	VCPU(vcpu, pta) = val;
  16.442 +	return IA64_NO_FAULT;
  16.443  }
  16.444  
  16.445 -static inline
  16.446 -IA64FAULT
  16.447 -vmx_vcpu_set_pta(VCPU *vcpu, u64 val)
  16.448 -{
  16.449 -    VCPU(vcpu,pta)=val;
  16.450 -    return IA64_NO_FAULT;
  16.451 -}
  16.452 -
  16.453 -static inline
  16.454 -IA64FAULT
  16.455 -vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
  16.456 +static inline IA64FAULT vmx_vcpu_set_lid(VCPU * vcpu, u64 val)
  16.457  {
  16.458 -    VCPU(vcpu,lid)=val;
  16.459 -    return IA64_NO_FAULT;
  16.460 +	VCPU(vcpu, lid) = val;
  16.461 +	return IA64_NO_FAULT;
  16.462  }
  16.463 -extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
  16.464 +extern IA64FAULT vmx_vcpu_set_tpr(VCPU * vcpu, u64 val);
  16.465  
  16.466 -static inline
  16.467 -IA64FAULT
  16.468 -vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
  16.469 +static inline IA64FAULT vmx_vcpu_set_eoi(VCPU * vcpu, u64 val)
  16.470  {
  16.471 -    guest_write_eoi(vcpu);
  16.472 -    return IA64_NO_FAULT;
  16.473 +	guest_write_eoi(vcpu);
  16.474 +	return IA64_NO_FAULT;
  16.475  }
  16.476  
  16.477 -static inline
  16.478 -IA64FAULT
  16.479 -vmx_vcpu_set_itv(VCPU *vcpu, u64 val)
  16.480 +static inline IA64FAULT vmx_vcpu_set_itv(VCPU * vcpu, u64 val)
  16.481  {
  16.482  
  16.483 -    vtm_set_itv(vcpu, val);
  16.484 -    return IA64_NO_FAULT;
  16.485 +	vtm_set_itv(vcpu, val);
  16.486 +	return IA64_NO_FAULT;
  16.487  }
  16.488 -static inline
  16.489 -IA64FAULT
  16.490 -vmx_vcpu_set_pmv(VCPU *vcpu, u64 val)
  16.491 -{
  16.492 -    VCPU(vcpu,pmv)=val;
  16.493 -    return IA64_NO_FAULT;
  16.494 -}
  16.495 -static inline
  16.496 -IA64FAULT
  16.497 -vmx_vcpu_set_cmcv(VCPU *vcpu, u64 val)
  16.498 +
  16.499 +static inline IA64FAULT vmx_vcpu_set_pmv(VCPU * vcpu, u64 val)
  16.500  {
  16.501 -    VCPU(vcpu,cmcv)=val;
  16.502 -    return IA64_NO_FAULT;
  16.503 -}
  16.504 -static inline
  16.505 -IA64FAULT
  16.506 -vmx_vcpu_set_lrr0(VCPU *vcpu, u64 val)
  16.507 -{
  16.508 -    VCPU(vcpu,lrr0)=val;
  16.509 -    return IA64_NO_FAULT;
  16.510 -}
  16.511 -static inline
  16.512 -IA64FAULT
  16.513 -vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val)
  16.514 -{
  16.515 -    VCPU(vcpu,lrr1)=val;
  16.516 -    return IA64_NO_FAULT;
  16.517 +	VCPU(vcpu, pmv) = val;
  16.518 +	return IA64_NO_FAULT;
  16.519  }
  16.520  
  16.521 +static inline IA64FAULT vmx_vcpu_set_cmcv(VCPU * vcpu, u64 val)
  16.522 +{
  16.523 +	VCPU(vcpu, cmcv) = val;
  16.524 +	return IA64_NO_FAULT;
  16.525 +}
  16.526  
  16.527 +static inline IA64FAULT vmx_vcpu_set_lrr0(VCPU * vcpu, u64 val)
  16.528 +{
  16.529 +	VCPU(vcpu, lrr0) = val;
  16.530 +	return IA64_NO_FAULT;
  16.531 +}
  16.532  
  16.533 +static inline IA64FAULT vmx_vcpu_set_lrr1(VCPU * vcpu, u64 val)
  16.534 +{
  16.535 +	VCPU(vcpu, lrr1) = val;
  16.536 +	return IA64_NO_FAULT;
  16.537 +}
  16.538  
  16.539  /**************************************************************************
  16.540   VCPU privileged application register access routines
  16.541  **************************************************************************/
  16.542 -static inline
  16.543 -IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val)
  16.544 +static inline IA64FAULT vmx_vcpu_set_itc(VCPU * vcpu, u64 val)
  16.545  {
  16.546 -    vtm_set_itc(vcpu, val);
  16.547 -    return  IA64_NO_FAULT;
  16.548 +	vtm_set_itc(vcpu, val);
  16.549 +	return IA64_NO_FAULT;
  16.550  }
  16.551 -static inline
  16.552 -IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val)
  16.553 +
  16.554 +static inline IA64FAULT vmx_vcpu_get_itc(VCPU * vcpu, u64 * val)
  16.555  {
  16.556 -    *val = vtm_get_itc(vcpu);
  16.557 -    return  IA64_NO_FAULT;
  16.558 +	*val = vtm_get_itc(vcpu);
  16.559 +	return IA64_NO_FAULT;
  16.560  }
  16.561 +
  16.562  /*
  16.563  static inline
  16.564 -IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  16.565 +IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, u64 reg, u64 *pval)
  16.566  {
  16.567      *pval = VMX(vcpu,vrr[reg>>61]);
  16.568 -    return (IA64_NO_FAULT);
  16.569 +    return IA64_NO_FAULT;
  16.570  }
  16.571   */
  16.572  /**************************************************************************
  16.573   VCPU debug breakpoint register access routines
  16.574  **************************************************************************/
  16.575  
  16.576 -static inline
  16.577 -IA64FAULT vmx_vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  16.578 +static inline IA64FAULT vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval)
  16.579  {
  16.580 -    // TODO: unimplemented DBRs return a reserved register fault
  16.581 -    // TODO: Should set Logical CPU state, not just physical
  16.582 -    if(reg > 4){
  16.583 -        panic_domain(vcpu_regs(vcpu),"there are only five cpuid registers");
  16.584 -    }
  16.585 -    *pval=VCPU(vcpu,vcpuid[reg]);
  16.586 -    return (IA64_NO_FAULT);
  16.587 +	// TODO: unimplemented DBRs return a reserved register fault
  16.588 +	// TODO: Should set Logical CPU state, not just physical
  16.589 +	if (reg > 4) {
  16.590 +		panic_domain(vcpu_regs(vcpu),
  16.591 +			     "there are only five cpuid registers");
  16.592 +	}
  16.593 +	*pval = VCPU(vcpu, vcpuid[reg]);
  16.594 +	return IA64_NO_FAULT;
  16.595 +}
  16.596 +
  16.597 +static inline IA64FAULT vmx_vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
  16.598 +{
  16.599 +	// TODO: unimplemented DBRs return a reserved register fault
  16.600 +	// TODO: Should set Logical CPU state, not just physical
  16.601 +	ia64_set_dbr(reg, val);
  16.602 +	return IA64_NO_FAULT;
  16.603  }
  16.604  
  16.605 -
  16.606 -static inline
  16.607 -IA64FAULT vmx_vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
  16.608 +static inline IA64FAULT vmx_vcpu_set_ibr(VCPU * vcpu, u64 reg, u64 val)
  16.609  {
  16.610 -    // TODO: unimplemented DBRs return a reserved register fault
  16.611 -    // TODO: Should set Logical CPU state, not just physical
  16.612 -    ia64_set_dbr(reg,val);
  16.613 -    return (IA64_NO_FAULT);
  16.614 +	// TODO: unimplemented IBRs return a reserved register fault
  16.615 +	// TODO: Should set Logical CPU state, not just physical
  16.616 +	ia64_set_ibr(reg, val);
  16.617 +	return IA64_NO_FAULT;
  16.618  }
  16.619 -static inline
  16.620 -IA64FAULT vmx_vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
  16.621 +
  16.622 +static inline IA64FAULT vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval)
  16.623  {
  16.624 -    // TODO: unimplemented IBRs return a reserved register fault
  16.625 -    // TODO: Should set Logical CPU state, not just physical
  16.626 -    ia64_set_ibr(reg,val);
  16.627 -    return (IA64_NO_FAULT);
  16.628 +	// TODO: unimplemented DBRs return a reserved register fault
  16.629 +	u64 val = ia64_get_dbr(reg);
  16.630 +	*pval = val;
  16.631 +	return IA64_NO_FAULT;
  16.632  }
  16.633 -static inline
  16.634 -IA64FAULT vmx_vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  16.635 +
  16.636 +static inline IA64FAULT vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval)
  16.637  {
  16.638 -    // TODO: unimplemented DBRs return a reserved register fault
  16.639 -    UINT64 val = ia64_get_dbr(reg);
  16.640 -    *pval = val;
  16.641 -    return (IA64_NO_FAULT);
  16.642 -}
  16.643 -static inline
  16.644 -IA64FAULT vmx_vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  16.645 -{
  16.646 -    // TODO: unimplemented IBRs return a reserved register fault
  16.647 -    UINT64 val = ia64_get_ibr(reg);
  16.648 -    *pval = val;
  16.649 -    return (IA64_NO_FAULT);
  16.650 +	// TODO: unimplemented IBRs return a reserved register fault
  16.651 +	u64 val = ia64_get_ibr(reg);
  16.652 +	*pval = val;
  16.653 +	return IA64_NO_FAULT;
  16.654  }
  16.655  
  16.656  /**************************************************************************
  16.657   VCPU performance monitor register access routines
  16.658  **************************************************************************/
  16.659 -static inline
  16.660 -IA64FAULT vmx_vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
  16.661 +static inline IA64FAULT vmx_vcpu_set_pmc(VCPU * vcpu, u64 reg, u64 val)
  16.662  {
  16.663 -    // TODO: Should set Logical CPU state, not just physical
  16.664 -    // NOTE: Writes to unimplemented PMC registers are discarded
  16.665 -    ia64_set_pmc(reg,val);
  16.666 -    return (IA64_NO_FAULT);
  16.667 +	// TODO: Should set Logical CPU state, not just physical
  16.668 +	// NOTE: Writes to unimplemented PMC registers are discarded
  16.669 +	ia64_set_pmc(reg, val);
  16.670 +	return IA64_NO_FAULT;
  16.671  }
  16.672 -static inline
  16.673 -IA64FAULT vmx_vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
  16.674 +
  16.675 +static inline IA64FAULT vmx_vcpu_set_pmd(VCPU * vcpu, u64 reg, u64 val)
  16.676  {
  16.677 -    // TODO: Should set Logical CPU state, not just physical
  16.678 -    // NOTE: Writes to unimplemented PMD registers are discarded
  16.679 -    ia64_set_pmd(reg,val);
  16.680 -    return (IA64_NO_FAULT);
  16.681 +	// TODO: Should set Logical CPU state, not just physical
  16.682 +	// NOTE: Writes to unimplemented PMD registers are discarded
  16.683 +	ia64_set_pmd(reg, val);
  16.684 +	return IA64_NO_FAULT;
  16.685  }
  16.686 -static inline
  16.687 -IA64FAULT vmx_vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  16.688 +
  16.689 +static inline IA64FAULT vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval)
  16.690  {
  16.691 -    // NOTE: Reads from unimplemented PMC registers return zero
  16.692 -    UINT64 val = (UINT64)ia64_get_pmc(reg);
  16.693 -    *pval = val;
  16.694 -    return (IA64_NO_FAULT);
  16.695 +	// NOTE: Reads from unimplemented PMC registers return zero
  16.696 +	u64 val = (u64) ia64_get_pmc(reg);
  16.697 +	*pval = val;
  16.698 +	return IA64_NO_FAULT;
  16.699  }
  16.700 -static inline
  16.701 -IA64FAULT vmx_vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  16.702 +
  16.703 +static inline IA64FAULT vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval)
  16.704  {
  16.705 -    // NOTE: Reads from unimplemented PMD registers return zero
  16.706 -    UINT64 val = (UINT64)ia64_get_pmd(reg);
  16.707 -    *pval = val;
  16.708 -    return (IA64_NO_FAULT);
  16.709 +	// NOTE: Reads from unimplemented PMD registers return zero
  16.710 +	u64 val = (u64) ia64_get_pmd(reg);
  16.711 +	*pval = val;
  16.712 +	return IA64_NO_FAULT;
  16.713  }
  16.714  
  16.715  /**************************************************************************
  16.716   VCPU banked general register access routines
  16.717  **************************************************************************/
  16.718  #if 0
  16.719 -static inline
  16.720 -IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
  16.721 +static inline IA64FAULT vmx_vcpu_bsw0(VCPU * vcpu)
  16.722  {
  16.723  
  16.724 -    VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
  16.725 -    return (IA64_NO_FAULT);
  16.726 +	VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
  16.727 +	return IA64_NO_FAULT;
  16.728  }
  16.729 -static inline
  16.730 -IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
  16.731 +
  16.732 +static inline IA64FAULT vmx_vcpu_bsw1(VCPU * vcpu)
  16.733  {
  16.734  
  16.735 -    VCPU(vcpu,vpsr) |= IA64_PSR_BN;
  16.736 -    return (IA64_NO_FAULT);
  16.737 +	VCPU(vcpu, vpsr) |= IA64_PSR_BN;
  16.738 +	return IA64_NO_FAULT;
  16.739  }
  16.740  #endif
  16.741  #if 0
  16.742  /* Another hash performance algorithm */
  16.743  #define redistribute_rid(rid)	(((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
  16.744  #endif
  16.745 -static inline unsigned long
  16.746 -vrrtomrr(VCPU *v, unsigned long val)
  16.747 +static inline unsigned long vrrtomrr(VCPU * v, unsigned long val)
  16.748  {
  16.749 -    ia64_rr rr;
  16.750 +	ia64_rr rr;
  16.751  
  16.752 -    rr.rrval=val;
  16.753 -    rr.rid = rr.rid + v->arch.starting_rid;
  16.754 -    if (rr.ps > PAGE_SHIFT)
  16.755 -        rr.ps = PAGE_SHIFT;
  16.756 -    rr.ve = 1;
  16.757 -    return  vmMangleRID(rr.rrval);
  16.758 +	rr.rrval = val;
  16.759 +	rr.rid = rr.rid + v->arch.starting_rid;
  16.760 +	if (rr.ps > PAGE_SHIFT)
  16.761 +		rr.ps = PAGE_SHIFT;
  16.762 +	rr.ve = 1;
  16.763 +	return vmMangleRID(rr.rrval);
  16.764  /* Disable this rid allocation algorithm for now */
  16.765  #if 0
  16.766 -    rid=(((u64)vcpu->domain->domain_id)<<DOMAIN_RID_SHIFT) + rr.rid;
  16.767 -    rr.rid = redistribute_rid(rid);
  16.768 -#endif 
  16.769 +	rid = (((u64) vcpu->domain->domain_id) << DOMAIN_RID_SHIFT) + rr.rid;
  16.770 +	rr.rid = redistribute_rid(rid);
  16.771 +#endif
  16.772  
  16.773  }
  16.774 -static inline thash_cb_t *
  16.775 -vmx_vcpu_get_vtlb(VCPU *vcpu)
  16.776 +static inline thash_cb_t *vmx_vcpu_get_vtlb(VCPU * vcpu)
  16.777  {
  16.778 -    return &vcpu->arch.vtlb;
  16.779 +	return &vcpu->arch.vtlb;
  16.780  }
  16.781  
  16.782 -static inline thash_cb_t *
  16.783 -vcpu_get_vhpt(VCPU *vcpu)
  16.784 +static inline thash_cb_t *vcpu_get_vhpt(VCPU * vcpu)
  16.785  {
  16.786 -    return &vcpu->arch.vhpt;
  16.787 +	return &vcpu->arch.vhpt;
  16.788  }
  16.789  
  16.790  #endif