ia64/xen-unstable

changeset 6469:10b1d30d3f66

Transform double mapping to single mapping on vti domain.
After this change I think it is possible to merge ivt.S file.
Signed-off-by Anthony Xu <anthony.xu@intel.com>
author djm@kirby.fc.hp.com
date Thu Sep 08 09:18:40 2005 -0600 (2005-09-08)
parents 0c1f966af47e
children b2f4823b6ff0
files xen/arch/ia64/linux-xen/efi.c xen/arch/ia64/linux-xen/unaligned.c xen/arch/ia64/vmx/mm.c xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_entry.S xen/arch/ia64/vmx/vmx_irq_ia64.c xen/arch/ia64/vmx/vmx_ivt.S xen/arch/ia64/vmx/vmx_minstate.h xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/vmx/vtlb.c xen/arch/ia64/xen/hyperprivop.S xen/arch/ia64/xen/regionreg.c xen/arch/ia64/xen/vcpu.c xen/include/asm-ia64/mm.h xen/include/asm-ia64/regionreg.h xen/include/asm-ia64/vmmu.h xen/include/asm-ia64/vmx.h xen/include/asm-ia64/vmx_vcpu.h xen/include/public/arch-ia64.h
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/efi.c	Thu Sep 08 07:24:08 2005 -0600
     1.2 +++ b/xen/arch/ia64/linux-xen/efi.c	Thu Sep 08 09:18:40 2005 -0600
     1.3 @@ -523,11 +523,21 @@ efi_get_pal_addr (void)
     1.4  	return NULL;
     1.5  }
     1.6  
     1.7 +
     1.8 +#ifdef XEN
     1.9 +void *pal_vaddr;
    1.10 +#endif
    1.11 +
    1.12  void
    1.13  efi_map_pal_code (void)
    1.14  {
    1.15 +#ifdef XEN
    1.16 +	u64 psr;
    1.17 +	pal_vaddr = efi_get_pal_addr ();
    1.18 +#else
    1.19  	void *pal_vaddr = efi_get_pal_addr ();
    1.20  	u64 psr;
    1.21 +#endif
    1.22  
    1.23  	if (!pal_vaddr)
    1.24  		return;
     2.1 --- a/xen/arch/ia64/linux-xen/unaligned.c	Thu Sep 08 07:24:08 2005 -0600
     2.2 +++ b/xen/arch/ia64/linux-xen/unaligned.c	Thu Sep 08 09:18:40 2005 -0600
     2.3 @@ -296,7 +296,7 @@ rotate_reg (unsigned long sor, unsigned 
     2.4  }
     2.5  
     2.6  #if defined(XEN) && defined(CONFIG_VTI)
     2.7 -static void
     2.8 +void
     2.9  set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat)
    2.10  {
    2.11  	struct switch_stack *sw = (struct switch_stack *) regs - 1;
    2.12 @@ -359,6 +359,57 @@ set_rse_reg (struct pt_regs *regs, unsig
    2.13      }
    2.14      ia64_set_rsc(old_rsc);
    2.15  }
    2.16 +
    2.17 +
    2.18 +static void
    2.19 +get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, unsigned long *nat)
    2.20 +{
    2.21 +    struct switch_stack *sw = (struct switch_stack *) regs - 1;
    2.22 +    unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
    2.23 +    unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
    2.24 +    unsigned long rnats, nat_mask;
    2.25 +    unsigned long on_kbs;
    2.26 +    unsigned long old_rsc, new_rsc;
    2.27 +    long sof = (regs->cr_ifs) & 0x7f;
    2.28 +    long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
    2.29 +    long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
    2.30 +    long ridx = r1 - 32;
    2.31 +
    2.32 +    if (ridx >= sof) {
    2.33 +        /* read of out-of-frame register returns an undefined value; 0 in our case.  */
    2.34 +        DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);
    2.35 +        panic("wrong stack register number");
    2.36 +    }
    2.37 +
    2.38 +    if (ridx < sor)
    2.39 +        ridx = rotate_reg(sor, rrb_gr, ridx);
    2.40 +
    2.41 +    old_rsc=ia64_get_rsc();
    2.42 +    new_rsc=old_rsc&(~(0x3));
    2.43 +    ia64_set_rsc(new_rsc);
    2.44 +
    2.45 +    bspstore = ia64_get_bspstore();
    2.46 +    bsp =kbs + (regs->loadrs >> 19); //16+3;
    2.47 +
    2.48 +    addr = ia64_rse_skip_regs(bsp, -sof + ridx);
    2.49 +    nat_mask = 1UL << ia64_rse_slot_num(addr);
    2.50 +    rnat_addr = ia64_rse_rnat_addr(addr);
    2.51 +
    2.52 +    if(addr >= bspstore){
    2.53 +
    2.54 +        ia64_flushrs ();
    2.55 +        ia64_mf ();
    2.56 +        bspstore = ia64_get_bspstore();
    2.57 +    }
    2.58 +    *val=*addr;
    2.59 +    if(bspstore < rnat_addr){
    2.60 +        *nat=!!(ia64_get_rnat()&nat_mask);
    2.61 +    }else{
    2.62 +        *nat = !!((*rnat_addr)&nat_mask);
    2.63 +    }
    2.64 +    ia64_set_rsc(old_rsc);
    2.65 +}
    2.66 +
    2.67  #else // CONFIG_VTI
    2.68  static void
    2.69  set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
     3.1 --- a/xen/arch/ia64/vmx/mm.c	Thu Sep 08 07:24:08 2005 -0600
     3.2 +++ b/xen/arch/ia64/vmx/mm.c	Thu Sep 08 09:18:40 2005 -0600
     3.3 @@ -125,7 +125,7 @@ int do_mmu_update(mmu_update_t *ureqs,u6
     3.4              entry.cl = DSIDE_TLB;
     3.5              rr = vmx_vcpu_rr(vcpu, req.ptr);
     3.6              entry.ps = rr.ps;
     3.7 -            entry.key = redistribute_rid(rr.rid);
     3.8 +            entry.key = rr.rid;
     3.9              entry.rid = rr.rid;
    3.10              entry.vadr = PAGEALIGN(req.ptr,entry.ps);
    3.11              sections.tr = 1;
     4.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Thu Sep 08 07:24:08 2005 -0600
     4.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Thu Sep 08 09:18:40 2005 -0600
     4.3 @@ -181,7 +181,7 @@ void vtm_set_itv(VCPU *vcpu)
     4.4   */
     4.5  /* Interrupt must be disabled at this point */
     4.6  
     4.7 -extern u64 tick_to_ns(u64 tick);
     4.8 +extern u64 cycle_to_ns(u64 cyle);
     4.9  #define TIMER_SLOP (50*1000) /* ns */  /* copy from ac_timer.c */
    4.10  void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
    4.11  {
    4.12 @@ -212,7 +212,7 @@ void vtm_interruption_update(VCPU *vcpu,
    4.13      }
    4.14      /* Both last_itc & cur_itc < itm, wait for fire condition */
    4.15      else {
    4.16 -        expires = NOW() + tick_to_ns(0-diff_now) + TIMER_SLOP;
    4.17 +        expires = NOW() + cycle_to_ns(0-diff_now) + TIMER_SLOP;
    4.18          set_ac_timer(&vtm->vtm_timer, expires);
    4.19      }
    4.20      local_irq_restore(spsr);
     5.1 --- a/xen/arch/ia64/vmx/vmmu.c	Thu Sep 08 07:24:08 2005 -0600
     5.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Thu Sep 08 09:18:40 2005 -0600
     5.3 @@ -91,6 +91,10 @@ ia64_rr vmmu_get_rr(VCPU *vcpu, u64 va)
     5.4  
     5.5  void recycle_message(thash_cb_t *hcb, u64 para)
     5.6  {
     5.7 +    if(hcb->ht == THASH_VHPT)
     5.8 +    {
     5.9 +        printk("ERROR : vhpt recycle happenning!!!\n");
    5.10 +    }
    5.11      printk("hcb=%p recycled with %lx\n",hcb,para);
    5.12  }
    5.13  
    5.14 @@ -237,8 +241,12 @@ alloc_pmt(struct domain *d)
    5.15   */
    5.16  void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
    5.17  {
    5.18 -    u64     saved_itir, saved_ifa, saved_rr;
    5.19 +#if 0
    5.20 +    u64     saved_itir, saved_ifa;
    5.21 +#endif
    5.22 +    u64      saved_rr;
    5.23      u64     pages;
    5.24 +    u64     psr;
    5.25      thash_data_t    mtlb;
    5.26      ia64_rr vrr;
    5.27      unsigned int    cl = tlb->cl;
    5.28 @@ -253,12 +261,12 @@ void machine_tlb_insert(struct vcpu *d, 
    5.29      if (mtlb.ppn == INVALID_MFN)
    5.30      panic("Machine tlb insert with invalid mfn number.\n");
    5.31  
    5.32 -    __asm __volatile("rsm   psr.ic|psr.i;; srlz.i" );
    5.33 -    
    5.34 +    psr = ia64_clear_ic();
    5.35 +#if 0
    5.36      saved_itir = ia64_getreg(_IA64_REG_CR_ITIR);
    5.37      saved_ifa = ia64_getreg(_IA64_REG_CR_IFA);
    5.38 +#endif
    5.39      saved_rr = ia64_get_rr(mtlb.ifa);
    5.40 -
    5.41      ia64_setreg(_IA64_REG_CR_ITIR, mtlb.itir);
    5.42      ia64_setreg(_IA64_REG_CR_IFA, mtlb.ifa);
    5.43      /* Only access memory stack which is mapped by TR,
    5.44 @@ -268,19 +276,23 @@ void machine_tlb_insert(struct vcpu *d, 
    5.45      ia64_srlz_d();
    5.46      if ( cl == ISIDE_TLB ) {
    5.47          ia64_itci(mtlb.page_flags);
    5.48 -    ia64_srlz_i();
    5.49 +        ia64_srlz_i();
    5.50      }
    5.51      else {
    5.52          ia64_itcd(mtlb.page_flags);
    5.53 -    ia64_srlz_d();
    5.54 +        ia64_srlz_d();
    5.55      }
    5.56      ia64_set_rr(mtlb.ifa,saved_rr);
    5.57      ia64_srlz_d();
    5.58 +#if 0
    5.59      ia64_setreg(_IA64_REG_CR_IFA, saved_ifa);
    5.60      ia64_setreg(_IA64_REG_CR_ITIR, saved_itir);
    5.61 -    __asm __volatile("ssm   psr.ic|psr.i;; srlz.i" );
    5.62 +#endif
    5.63 +    ia64_set_psr(psr);
    5.64 +    ia64_srlz_i();
    5.65  }
    5.66  
    5.67 +
    5.68  u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps)
    5.69  {
    5.70      u64     saved_pta, saved_rr0;
    5.71 @@ -289,7 +301,6 @@ u64 machine_thash(PTA pta, u64 va, u64 r
    5.72      struct vcpu *v = current;
    5.73      ia64_rr vrr;
    5.74  
    5.75 -    
    5.76      saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
    5.77      saved_rr0 = ia64_get_rr(0);
    5.78      vrr.rrval = saved_rr0;
    5.79 @@ -308,7 +319,7 @@ u64 machine_thash(PTA pta, u64 va, u64 r
    5.80  
    5.81      ia64_set_rr(0, saved_rr0);
    5.82      ia64_srlz_d();
    5.83 -    local_irq_restore(psr);
    5.84 +    ia64_set_psr(psr);
    5.85      return hash_addr;
    5.86  }
    5.87  
    5.88 @@ -320,7 +331,7 @@ u64 machine_ttag(PTA pta, u64 va, u64 ri
    5.89      struct vcpu *v = current;
    5.90      ia64_rr vrr;
    5.91  
    5.92 -    // TODO: Set to enforce lazy mode    
    5.93 +    // TODO: Set to enforce lazy mode
    5.94      saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
    5.95      saved_rr0 = ia64_get_rr(0);
    5.96      vrr.rrval = saved_rr0;
    5.97 @@ -341,7 +352,6 @@ u64 machine_ttag(PTA pta, u64 va, u64 ri
    5.98      local_irq_restore(psr);
    5.99      return tag;
   5.100  }
   5.101 -
   5.102  /*
   5.103   *  Purge machine tlb.
   5.104   *  INPUT
     6.1 --- a/xen/arch/ia64/vmx/vmx_entry.S	Thu Sep 08 07:24:08 2005 -0600
     6.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S	Thu Sep 08 09:18:40 2005 -0600
     6.3 @@ -33,7 +33,7 @@
     6.4  #include <asm/processor.h>
     6.5  #include <asm/thread_info.h>
     6.6  #include <asm/unistd.h>
     6.7 -
     6.8 +#include <asm/vhpt.h>
     6.9  #include "vmx_minstate.h"
    6.10  
    6.11  /*
    6.12 @@ -401,8 +401,9 @@ vmx_dorfirfi_back:
    6.13      mov b0=r16
    6.14      br.cond.sptk b0         // call the service
    6.15      ;;
    6.16 +switch_rr7:
    6.17 +#ifdef XEN_DBL_MAPPING
    6.18  // switch rr7 and rr5
    6.19 -switch_rr7:
    6.20      adds r24=SWITCH_MRR5_OFFSET, r21
    6.21      adds r26=SWITCH_MRR6_OFFSET, r21
    6.22      adds r16=SWITCH_MRR7_OFFSET ,r21
    6.23 @@ -428,6 +429,7 @@ switch_rr7:
    6.24      ;;
    6.25      srlz.i
    6.26      ;;
    6.27 +#endif
    6.28  // fall through
    6.29  GLOBAL_ENTRY(ia64_vmm_entry)
    6.30  /*
    6.31 @@ -470,6 +472,7 @@ GLOBAL_ENTRY(vmx_dorfirfi)
    6.32  	;;
    6.33  END(vmx_dorfirfi)
    6.34  
    6.35 +#ifdef XEN_DBL_MAPPING  /* will be removed */
    6.36  
    6.37  #define VMX_PURGE_RR7	0
    6.38  #define VMX_INSERT_RR7	1
    6.39 @@ -609,3 +612,180 @@ GLOBAL_ENTRY(vmx_switch_rr7)
    6.40      br.sptk rp
    6.41  END(vmx_switch_rr7)
    6.42      .align PAGE_SIZE
    6.43 +
    6.44 +#else
    6.45 +/*
    6.46 + * in0: new rr7
    6.47 + * in1: virtual address of shared_info
    6.48 + * in2: virtual address of shared_arch_info (VPD)
    6.49 + * in3: virtual address of guest_vhpt
    6.50 + * in4: virtual address of pal code segment
    6.51 + * r8: will contain old rid value
    6.52 + */
    6.53 +
    6.54 +
    6.55 +#define PSR_BITS_TO_CLEAR                      \
    6.56 +   (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB |IA64_PSR_RT |     \
    6.57 +    IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED |    \
    6.58 +    IA64_PSR_DFL | IA64_PSR_DFH)
    6.59 +#define PSR_BITS_TO_SET    IA64_PSR_BN
    6.60 +
    6.61 +//extern void vmx_switch_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, void *guest_vhpt, void * pal_vaddr );
    6.62 +
    6.63 +GLOBAL_ENTRY(vmx_switch_rr7)
    6.64 +   // not sure this unwind statement is correct...
    6.65 +   .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
    6.66 +   alloc loc1 = ar.pfs, 5, 9, 0, 0
    6.67 +1: {
    6.68 +     mov r28  = in0        // copy procedure index
    6.69 +     mov r8   = ip         // save ip to compute branch
    6.70 +     mov loc0 = rp         // save rp
    6.71 +    };;
    6.72 +    .body
    6.73 +    movl loc2=PERCPU_ADDR
    6.74 +    ;;
    6.75 +    tpa loc2 = loc2         // get physical address of per cpu date
    6.76 +    ;;
    6.77 +    dep loc3 = 0,in1,60,4          // get physical address of shared_info
    6.78 +    dep loc4 = 0,in2,60,4          // get physical address of shared_arch_info
    6.79 +    dep loc5 = 0,in3,60,4          // get physical address of guest_vhpt
    6.80 +    dep loc6 = 0,in4,60,4          // get physical address of pal code
    6.81 +    ;;
    6.82 +    mov loc7 = psr          // save psr
    6.83 +    ;;
    6.84 +    mov loc8 = ar.rsc           // save RSE configuration
    6.85 +    ;;
    6.86 +    mov ar.rsc = 0          // put RSE in enforced lazy, LE mode
    6.87 +    movl r16=PSR_BITS_TO_CLEAR
    6.88 +    movl r17=PSR_BITS_TO_SET
    6.89 +    ;;
    6.90 +    or loc7 = loc7,r17      // add in psr the bits to set
    6.91 +    ;;
    6.92 +    andcm r16=loc7,r16      // removes bits to clear from psr
    6.93 +    br.call.sptk.many rp=ia64_switch_mode_phys
    6.94 +1:
    6.95 +   // now in physical mode with psr.i/ic off so do rr7 switch
    6.96 +    dep r16=-1,r0,61,3
    6.97 +    ;;
    6.98 +    mov rr[r16]=in0
    6.99 +    srlz.d
   6.100 +    ;;
   6.101 +    rsm 0x6000
   6.102 +    ;;
   6.103 +    srlz.d
   6.104 +
   6.105 +    // re-pin mappings for kernel text and data
   6.106 +    mov r18=KERNEL_TR_PAGE_SHIFT<<2
   6.107 +    movl r17=KERNEL_START
   6.108 +    ;;
   6.109 +    ptr.i   r17,r18
   6.110 +    ptr.d   r17,r18
   6.111 +    ;;
   6.112 +    mov cr.itir=r18
   6.113 +    mov cr.ifa=r17
   6.114 +    mov r16=IA64_TR_KERNEL
   6.115 +    //mov r3=ip
   6.116 +    movl r25 = PAGE_KERNEL
   6.117 +    ;;
   6.118 +    dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
   6.119 +    ;;
   6.120 +    or r18=r2,r25
   6.121 +    ;;
   6.122 +   srlz.i
   6.123 +   ;;
   6.124 +   itr.i itr[r16]=r18
   6.125 +   ;;
   6.126 +   itr.d dtr[r16]=r18
   6.127 +   ;;
   6.128 +
   6.129 +   // re-pin mappings for per-cpu data
   6.130 +
   6.131 +   movl r22 = PERCPU_ADDR
   6.132 +   ;;
   6.133 +   mov r24=IA64_TR_PERCPU_DATA
   6.134 +   or loc2 = r25,loc2          // construct PA | page properties
   6.135 +   mov r23=PERCPU_PAGE_SHIFT<<2
   6.136 +   ;;
   6.137 +   ptr.d   r22,r23
   6.138 +   ;;
   6.139 +   mov cr.itir=r23
   6.140 +   mov cr.ifa=r22
   6.141 +   ;;
   6.142 +   itr.d dtr[r24]=loc2     // wire in new mapping...
   6.143 +   ;;
   6.144 +
   6.145 +
   6.146 +#if    0
   6.147 +   // re-pin mappings for shared_info
   6.148 +
   6.149 +   mov r24=IA64_TR_SHARED_INFO
   6.150 +   movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
   6.151 +   ;;
   6.152 +   or loc3 = r25,loc3          // construct PA | page properties
   6.153 +   mov r23 = PAGE_SHIFT<<2
   6.154 +   ;;
   6.155 +   ptr.d   in1,r23
   6.156 +   ;;
   6.157 +   mov cr.itir=r23
   6.158 +   mov cr.ifa=in1
   6.159 +   ;;
   6.160 +   itr.d dtr[r24]=loc3     // wire in new mapping...
   6.161 +   ;;
   6.162 +   // re-pin mappings for shared_arch_info
   6.163 +
   6.164 +   mov r24=IA64_TR_ARCH_INFO
   6.165 +   or loc4 = r25,loc4          // construct PA | page properties
   6.166 +   mov r23 = PAGE_SHIFT<<2
   6.167 +   ;;
   6.168 +   ptr.d   in2,r23
   6.169 +   ;;
   6.170 +   mov cr.itir=r23
   6.171 +   mov cr.ifa=in2
   6.172 +   ;;
   6.173 +   itr.d dtr[r24]=loc4     // wire in new mapping...
   6.174 +   ;;
   6.175 +#endif
   6.176 +
   6.177 +
   6.178 +   // re-pin mappings for guest_vhpt
   6.179 +
   6.180 +   mov r24=IA64_TR_VHPT
   6.181 +   movl r25=PAGE_KERNEL
   6.182 +   ;;
   6.183 +   or loc5 = r25,loc5          // construct PA | page properties
   6.184 +   mov r23 = VCPU_TLB_SHIFT<<2
   6.185 +   ;;
   6.186 +   ptr.d   in3,r23
   6.187 +   ;;
   6.188 +   mov cr.itir=r23
   6.189 +   mov cr.ifa=in3
   6.190 +   ;;
   6.191 +   itr.d dtr[r24]=loc5     // wire in new mapping...
   6.192 +   ;;
   6.193 +
   6.194 +   // re-pin mappings for PAL code section
   6.195 +
   6.196 +   mov r24=IA64_TR_PALCODE
   6.197 +   or loc6 = r25,loc6          // construct PA | page properties
   6.198 +   mov r23 = IA64_GRANULE_SHIFT<<2
   6.199 +   ;;
   6.200 +   ptr.i   in4,r23
   6.201 +   ;;
   6.202 +   mov cr.itir=r23
   6.203 +   mov cr.ifa=in4
   6.204 +   ;;
   6.205 +   itr.i itr[r24]=loc6     // wire in new mapping...
   6.206 +   ;;
   6.207 +
   6.208 +   // done, switch back to virtual and return
   6.209 +   mov r16=loc7            // r16= original psr
   6.210 +   br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
   6.211 +   mov ar.pfs = loc1
   6.212 +   mov rp = loc0
   6.213 +   ;;
   6.214 +   mov ar.rsc=loc8         // restore RSE configuration
   6.215 +   srlz.d              // seralize restoration of psr.l
   6.216 +   br.ret.sptk.many rp
   6.217 +END(vmx_switch_rr7)
   6.218 +#endif
   6.219 +
     7.1 --- a/xen/arch/ia64/vmx/vmx_irq_ia64.c	Thu Sep 08 07:24:08 2005 -0600
     7.2 +++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c	Thu Sep 08 09:18:40 2005 -0600
     7.3 @@ -24,6 +24,12 @@
     7.4  #include <asm/pgtable.h>
     7.5  #include <asm/system.h>
     7.6  
     7.7 +#ifdef CONFIG_SMP
     7.8 +#   define IS_RESCHEDULE(vec)   (vec == IA64_IPI_RESCHEDULE)
     7.9 +#else
    7.10 +#   define IS_RESCHEDULE(vec)   (0)
    7.11 +#endif
    7.12 +
    7.13  #ifdef CONFIG_PERFMON
    7.14  # include <asm/perfmon.h>
    7.15  #endif
     8.1 --- a/xen/arch/ia64/vmx/vmx_ivt.S	Thu Sep 08 07:24:08 2005 -0600
     8.2 +++ b/xen/arch/ia64/vmx/vmx_ivt.S	Thu Sep 08 09:18:40 2005 -0600
     8.3 @@ -118,10 +118,12 @@ ENTRY(vmx_itlb_miss)
     8.4      mov r29=cr.ipsr;
     8.5      ;;
     8.6      tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
     8.7 -(p6) br.sptk vmx_fault_1
     8.8 +(p6) br.sptk vmx_alt_itlb_miss_1
     8.9 +//(p6) br.sptk vmx_fault_1
    8.10      mov r16 = cr.ifa
    8.11      ;;
    8.12      thash r17 = r16
    8.13 +    ;;
    8.14      ttag r20 = r16
    8.15      ;;
    8.16  vmx_itlb_loop:
    8.17 @@ -180,10 +182,12 @@ ENTRY(vmx_dtlb_miss)
    8.18      mov r29=cr.ipsr;
    8.19      ;;
    8.20      tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
    8.21 -(p6)br.sptk vmx_fault_2
    8.22 +    (p6)br.sptk vmx_alt_dtlb_miss_1
    8.23 +//(p6)br.sptk vmx_fault_2
    8.24      mov r16 = cr.ifa
    8.25      ;;
    8.26      thash r17 = r16
    8.27 +    ;;
    8.28      ttag r20 = r16
    8.29      ;;
    8.30  vmx_dtlb_loop:
    8.31 @@ -243,6 +247,7 @@ ENTRY(vmx_alt_itlb_miss)
    8.32      ;;
    8.33      tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
    8.34  (p7)br.sptk vmx_fault_3
    8.35 +vmx_alt_itlb_miss_1:
    8.36  	mov r16=cr.ifa		// get address that caused the TLB miss
    8.37  	movl r17=PAGE_KERNEL
    8.38  	mov r24=cr.ipsr
    8.39 @@ -272,6 +277,7 @@ ENTRY(vmx_alt_dtlb_miss)
    8.40      ;;
    8.41      tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
    8.42  (p7)br.sptk vmx_fault_4
    8.43 +vmx_alt_dtlb_miss_1:
    8.44  	mov r16=cr.ifa		// get address that caused the TLB miss
    8.45  	movl r17=PAGE_KERNEL
    8.46  	mov r20=cr.isr
     9.1 --- a/xen/arch/ia64/vmx/vmx_minstate.h	Thu Sep 08 07:24:08 2005 -0600
     9.2 +++ b/xen/arch/ia64/vmx/vmx_minstate.h	Thu Sep 08 09:18:40 2005 -0600
     9.3 @@ -128,21 +128,31 @@
     9.4   * Note that psr.ic is NOT turned on by this macro.  This is so that
     9.5   * we can pass interruption state as arguments to a handler.
     9.6   */
     9.7 -#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                           \
     9.8 +#ifdef XEN_DBL_MAPPING
     9.9 +#define SAVE_MIN_CHANGE_RR  \
    9.10  /*  switch rr7 */       \
    9.11      movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)); \
    9.12      movl r17=(7<<61);        \
    9.13      movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)); \
    9.14      movl r22=(6<<61);        \
    9.15 -    movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1);		\
    9.16 -    movl r23=(5<<61);	\
    9.17 +    movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1);     \
    9.18 +    movl r23=(5<<61);   \
    9.19      ;;              \
    9.20      mov rr[r17]=r16;             \
    9.21 -    mov rr[r22]=r20;		 \
    9.22 -    mov rr[r23]=r18;		 \
    9.23 +    mov rr[r22]=r20;         \
    9.24 +    mov rr[r23]=r18;         \
    9.25      ;;      \
    9.26      srlz.i;      \
    9.27 -    ;;  \
    9.28 +    ;;
    9.29 +
    9.30 +#else
    9.31 +
    9.32 +#define SAVE_MIN_CHANGE_RR
    9.33 +
    9.34 +#endif
    9.35 +
    9.36 +#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                           \
    9.37 +    SAVE_MIN_CHANGE_RR;      \
    9.38      VMX_MINSTATE_GET_CURRENT(r16);  /* M (or M;;I) */                   \
    9.39      mov r27=ar.rsc;         /* M */                         \
    9.40      mov r20=r1;         /* A */                         \
    10.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Thu Sep 08 07:24:08 2005 -0600
    10.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Thu Sep 08 09:18:40 2005 -0600
    10.3 @@ -28,7 +28,6 @@
    10.4  #include <xen/sched.h>
    10.5  #include <asm/pgtable.h>
    10.6  
    10.7 -
    10.8  int valid_mm_mode[8] = {
    10.9      GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
   10.10      INV_MODE,
   10.11 @@ -215,13 +214,13 @@ void
   10.12  vmx_init_all_rr(VCPU *vcpu)
   10.13  {
   10.14  	VMX(vcpu,vrr[VRN0]) = 0x38;
   10.15 -	VMX(vcpu,vrr[VRN1]) = 0x38;
   10.16 -	VMX(vcpu,vrr[VRN2]) = 0x38;
   10.17 -	VMX(vcpu,vrr[VRN3]) = 0x38;
   10.18 -	VMX(vcpu,vrr[VRN4]) = 0x38;
   10.19 -	VMX(vcpu,vrr[VRN5]) = 0x38;
   10.20 -	VMX(vcpu,vrr[VRN6]) = 0x60;
   10.21 -	VMX(vcpu,vrr[VRN7]) = 0x60;
   10.22 +	VMX(vcpu,vrr[VRN1]) = 0x138;
   10.23 +	VMX(vcpu,vrr[VRN2]) = 0x238;
   10.24 +	VMX(vcpu,vrr[VRN3]) = 0x338;
   10.25 +	VMX(vcpu,vrr[VRN4]) = 0x438;
   10.26 +	VMX(vcpu,vrr[VRN5]) = 0x538;
   10.27 +	VMX(vcpu,vrr[VRN6]) = 0x660;
   10.28 +	VMX(vcpu,vrr[VRN7]) = 0x760;
   10.29  
   10.30  	VMX(vcpu,mrr5) = vmx_vrrtomrr(vcpu, 0x38);
   10.31  	VMX(vcpu,mrr6) = vmx_vrrtomrr(vcpu, 0x60);
   10.32 @@ -234,10 +233,8 @@ vmx_load_all_rr(VCPU *vcpu)
   10.33  	unsigned long psr;
   10.34  	ia64_rr phy_rr;
   10.35  
   10.36 -	psr = ia64_clear_ic();
   10.37 +	local_irq_save(psr);
   10.38  
   10.39 -	phy_rr.ps = EMUL_PHY_PAGE_SHIFT; 
   10.40 -	phy_rr.ve = 1;
   10.41  
   10.42  	/* WARNING: not allow co-exist of both virtual mode and physical
   10.43  	 * mode in same region
   10.44 @@ -245,9 +242,15 @@ vmx_load_all_rr(VCPU *vcpu)
   10.45  	if (is_physical_mode(vcpu)) {
   10.46  		if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
   10.47  			panic("Unexpected domain switch in phy emul\n");
   10.48 -		phy_rr.rid = vcpu->domain->arch.metaphysical_rr0;
   10.49 +		phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
   10.50 +    	phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
   10.51 +    	phy_rr.ve = 1;
   10.52 +
   10.53  		ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
   10.54 -		phy_rr.rid = vcpu->domain->arch.metaphysical_rr4;
   10.55 +		phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
   10.56 +    	phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
   10.57 +	    phy_rr.ve = 1;
   10.58 +
   10.59  		ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
   10.60  	} else {
   10.61  		ia64_set_rr((VRN0 << VRN_SHIFT),
   10.62 @@ -265,6 +268,18 @@ vmx_load_all_rr(VCPU *vcpu)
   10.63  	ia64_set_rr((VRN3 << VRN_SHIFT),
   10.64  		     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
   10.65  #endif
   10.66 +#ifndef XEN_DBL_MAPPING
   10.67 +    extern void * pal_vaddr;
   10.68 +    ia64_set_rr((VRN5 << VRN_SHIFT),
   10.69 +            vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
   10.70 +    ia64_set_rr((VRN6 << VRN_SHIFT),
   10.71 +            vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
   10.72 +    vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),(void *)vcpu->domain->shared_info,
   10.73 +                (void *)vcpu->vcpu_info->arch.privregs,
   10.74 +                ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
   10.75 +    ia64_set_pta(vcpu->arch.arch_vmx.mpta);
   10.76 +#endif
   10.77 +
   10.78  	ia64_srlz_d();
   10.79  	ia64_set_psr(psr);
   10.80      ia64_srlz_i();
   10.81 @@ -276,15 +291,17 @@ switch_to_physical_rid(VCPU *vcpu)
   10.82      UINT64 psr;
   10.83      ia64_rr phy_rr;
   10.84  
   10.85 -    phy_rr.ps = EMUL_PHY_PAGE_SHIFT; 
   10.86 -    phy_rr.ve = 1;
   10.87  
   10.88      /* Save original virtual mode rr[0] and rr[4] */
   10.89      psr=ia64_clear_ic();
   10.90 -    phy_rr.rid = vcpu->domain->arch.metaphysical_rr0;
   10.91 +    phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
   10.92 +    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
   10.93 +    phy_rr.ve = 1;
   10.94      ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
   10.95      ia64_srlz_d();
   10.96 -    phy_rr.rid = vcpu->domain->arch.metaphysical_rr4;
   10.97 +    phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
   10.98 +    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
   10.99 +    phy_rr.ve = 1;
  10.100      ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
  10.101      ia64_srlz_d();
  10.102  
    11.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Thu Sep 08 07:24:08 2005 -0600
    11.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Thu Sep 08 09:18:40 2005 -0600
    11.3 @@ -41,7 +41,7 @@
    11.4  #include <asm/regionreg.h>
    11.5  #include <asm/privop.h>
    11.6  #include <asm/ia64_int.h>
    11.7 -#include <asm/hpsim_ssc.h>
    11.8 +//#include <asm/hpsim_ssc.h>
    11.9  #include <asm/dom_fw.h>
   11.10  #include <asm/vmx_vcpu.h>
   11.11  #include <asm/kregs.h>
    12.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Thu Sep 08 07:24:08 2005 -0600
    12.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Thu Sep 08 09:18:40 2005 -0600
    12.3 @@ -215,6 +215,7 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI
    12.4  {
    12.5      ia64_rr oldrr,newrr;
    12.6      thash_cb_t *hcb;
    12.7 +    extern void * pal_vaddr;
    12.8      oldrr=vmx_vcpu_rr(vcpu,reg);
    12.9      newrr.rrval=val;
   12.10  #if 1
   12.11 @@ -224,7 +225,9 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI
   12.12      }
   12.13  #endif
   12.14      VMX(vcpu,vrr[reg>>61]) = val;
   12.15 +
   12.16      switch((u64)(reg>>61)) {
   12.17 +#ifdef XEN_DBL_MAPPING
   12.18      case VRN5:
   12.19          VMX(vcpu,mrr5)=vmx_vrrtomrr(vcpu,val);
   12.20          break;
   12.21 @@ -234,12 +237,17 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI
   12.22      case VRN7:
   12.23          VMX(vcpu,mrr7)=vmx_vrrtomrr(vcpu,val);
   12.24          /* Change double mapping for this domain */
   12.25 -#ifdef XEN_DBL_MAPPING
   12.26          vmx_change_double_mapping(vcpu,
   12.27                        vmx_vrrtomrr(vcpu,oldrr.rrval),
   12.28                        vmx_vrrtomrr(vcpu,newrr.rrval));
   12.29 +        break;
   12.30 +#else
   12.31 +    case VRN7:
   12.32 +       vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info,
   12.33 +        (void *)vcpu->vcpu_info->arch.privregs,
   12.34 +       ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
   12.35 +       break;
   12.36  #endif
   12.37 -        break;
   12.38      default:
   12.39          ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
   12.40          break;
    13.1 --- a/xen/arch/ia64/vmx/vtlb.c	Thu Sep 08 07:24:08 2005 -0600
    13.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Thu Sep 08 09:18:40 2005 -0600
    13.3 @@ -343,7 +343,7 @@ thash_data_t *__alloc_chain(thash_cb_t *
    13.4                  hcb->recycle_notifier(hcb,(u64)entry);
    13.5          }
    13.6          thash_purge_all(hcb);
    13.7 -        cch = cch_alloc(hcb);
    13.8 +//        cch = cch_alloc(hcb);
    13.9      }
   13.10      return cch;
   13.11  }
   13.12 @@ -364,7 +364,7 @@ void vtlb_insert(thash_cb_t *hcb, thash_
   13.13      ia64_rr vrr;
   13.14      u64 gppn;
   13.15      u64 ppns, ppne;
   13.16 -    
   13.17 +
   13.18      hash_table = (hcb->hash_func)(hcb->pta,
   13.19                          va, entry->rid, entry->ps);
   13.20      if( INVALID_ENTRY(hcb, hash_table) ) {
   13.21 @@ -374,10 +374,14 @@ void vtlb_insert(thash_cb_t *hcb, thash_
   13.22      else {
   13.23          // TODO: Add collision chain length limitation.
   13.24          cch = __alloc_chain(hcb,entry);
   13.25 -        
   13.26 -        *cch = *hash_table;
   13.27 -        *hash_table = *entry;
   13.28 -        hash_table->next = cch;
   13.29 +        if(cch == NULL){
   13.30 +            *hash_table = *entry;
   13.31 +            hash_table->next = 0;
   13.32 +        }else{
   13.33 +            *cch = *hash_table;
   13.34 +            *hash_table = *entry;
   13.35 +            hash_table->next = cch;
   13.36 +        }
   13.37      }
   13.38      if(hcb->vcpu->domain->domain_id==0){
   13.39         thash_insert(hcb->ts->vhpt, entry, va);
   13.40 @@ -396,26 +400,29 @@ void vtlb_insert(thash_cb_t *hcb, thash_
   13.41  
   13.42  static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
   13.43  {
   13.44 -    thash_data_t    *hash_table, *cch;
   13.45 +    thash_data_t   vhpt_entry, *hash_table, *cch;
   13.46      ia64_rr vrr;
   13.47 -    
   13.48 +    if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
   13.49 +        panic("Can't convert to machine VHPT entry\n");
   13.50 +    }
   13.51      hash_table = (hcb->hash_func)(hcb->pta,
   13.52                          va, entry->rid, entry->ps);
   13.53      if( INVALID_ENTRY(hcb, hash_table) ) {
   13.54 -        if ( !__tlb_to_vhpt(hcb, entry, va, hash_table) ) {
   13.55 -            panic("Can't convert to machine VHPT entry\n");
   13.56 -        }
   13.57 +        *hash_table = vhpt_entry;
   13.58          hash_table->next = 0;
   13.59      }
   13.60      else {
   13.61          // TODO: Add collision chain length limitation.
   13.62          cch = __alloc_chain(hcb,entry);
   13.63 -        
   13.64 -        *cch = *hash_table;
   13.65 -        if ( !__tlb_to_vhpt(hcb, entry, va, hash_table) ) {
   13.66 -            panic("Can't convert to machine VHPT entry\n");
   13.67 +        if(cch == NULL){
   13.68 +            *hash_table = vhpt_entry;
   13.69 +            hash_table->next = 0;
   13.70 +        }else{
   13.71 +            *cch = *hash_table;
   13.72 +            *hash_table = vhpt_entry;
   13.73 +            hash_table->next = cch;
   13.74          }
   13.75 -        hash_table->next = cch;
   13.76 +
   13.77          if(hash_table->tag==hash_table->next->tag)
   13.78              while(1);
   13.79      }
   13.80 @@ -488,10 +495,10 @@ static thash_data_t *thash_rem_cch(thash
   13.81  {
   13.82      thash_data_t *next;
   13.83  
   13.84 -    if ( ++cch_depth > MAX_CCH_LENGTH ) {
   13.85 -        printf ("cch length > MAX_CCH_LENGTH, exceed the expected length\n");
   13.86 -        while(1);
   13.87 -   }
   13.88 +//    if ( ++cch_depth > MAX_CCH_LENGTH ) {
   13.89 +//        printf ("cch length > MAX_CCH_LENGTH, exceed the expected length\n");
   13.90 +//        while(1);
   13.91 +//   }
   13.92      if ( cch -> next ) {
   13.93          next = thash_rem_cch(hcb, cch->next);
   13.94      }
   13.95 @@ -914,7 +921,7 @@ void thash_init(thash_cb_t *hcb, u64 sz)
   13.96          INVALIDATE_HASH(hcb,hash_table);
   13.97      }
   13.98  }
   13.99 -
  13.100 +#define VTLB_DEBUG
  13.101  #ifdef  VTLB_DEBUG
  13.102  static  u64 cch_length_statistics[MAX_CCH_LENGTH+1];
  13.103  u64  sanity_check=0;
    14.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Thu Sep 08 07:24:08 2005 -0600
    14.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Thu Sep 08 09:18:40 2005 -0600
    14.3 @@ -27,6 +27,24 @@
    14.4  #undef RFI_TO_INTERRUPT // not working yet
    14.5  #endif
    14.6  
    14.7 +#define    XEN_HYPER_RFI           0x1
    14.8 +#define    XEN_HYPER_RSM_DT        0x2
    14.9 +#define    XEN_HYPER_SSM_DT        0x3
   14.10 +#define    XEN_HYPER_COVER         0x4
   14.11 +#define    XEN_HYPER_ITC_D         0x5
   14.12 +#define    XEN_HYPER_ITC_I         0x6
   14.13 +#define    XEN_HYPER_SSM_I         0x7
   14.14 +#define    XEN_HYPER_GET_IVR       0x8
   14.15 +#define    XEN_HYPER_GET_TPR       0x9
   14.16 +#define    XEN_HYPER_SET_TPR       0xa
   14.17 +#define    XEN_HYPER_EOI           0xb
   14.18 +#define    XEN_HYPER_SET_ITM       0xc
   14.19 +#define    XEN_HYPER_THASH         0xd
   14.20 +#define    XEN_HYPER_PTC_GA        0xe
   14.21 +#define    XEN_HYPER_ITR_D         0xf
   14.22 +#define    XEN_HYPER_GET_RR        0x10
   14.23 +#define    XEN_HYPER_SET_RR        0x11
   14.24 +
   14.25  #ifdef CONFIG_SMP
   14.26  #warning "FIXME: ptc.ga instruction requires spinlock for SMP"
   14.27  #undef FAST_PTC_GA
    15.1 --- a/xen/arch/ia64/xen/regionreg.c	Thu Sep 08 07:24:08 2005 -0600
    15.2 +++ b/xen/arch/ia64/xen/regionreg.c	Thu Sep 08 09:18:40 2005 -0600
    15.3 @@ -51,7 +51,7 @@ ia64_set_rr (unsigned long rr, unsigned 
    15.4  // use this to allocate a rid out of the "Xen reserved rid block"
    15.5  unsigned long allocate_reserved_rid(void)
    15.6  {
    15.7 -	static unsigned long currentrid = XEN_DEFAULT_RID;
    15.8 +	static unsigned long currentrid = XEN_DEFAULT_RID+1;
    15.9  	unsigned long t = currentrid;
   15.10  
   15.11  	unsigned long max = RIDS_PER_RIDBLOCK;
    16.1 --- a/xen/arch/ia64/xen/vcpu.c	Thu Sep 08 07:24:08 2005 -0600
    16.2 +++ b/xen/arch/ia64/xen/vcpu.c	Thu Sep 08 09:18:40 2005 -0600
    16.3 @@ -1037,7 +1037,7 @@ void vcpu_set_next_timer(VCPU *vcpu)
    16.4  #endif
    16.5  
    16.6  	if (is_idle_task(vcpu->domain)) {
    16.7 -		printf("****** vcpu_set_next_timer called during idle!!\n");
    16.8 +//		printf("****** vcpu_set_next_timer called during idle!!\n");
    16.9  		vcpu_safe_set_itm(s);
   16.10  		return;
   16.11  	}
    17.1 --- a/xen/include/asm-ia64/mm.h	Thu Sep 08 07:24:08 2005 -0600
    17.2 +++ b/xen/include/asm-ia64/mm.h	Thu Sep 08 09:18:40 2005 -0600
    17.3 @@ -163,8 +163,8 @@ static inline int get_page(struct pfn_in
    17.4  	    unlikely((nx & PGC_count_mask) == 0) ||	/* Count overflow? */
    17.5  	    unlikely((x >> 32) != _domain)) {		/* Wrong owner? */
    17.6  	    DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%08x\n",
    17.7 -		page_to_pfn(page), domain, unpickle_domptr(d),
    17.8 -		x, page->u.inuse.typeinfo);
    17.9 +		page_to_pfn(page), domain, unpickle_domptr(domain),
   17.10 +		x, page->u.inuse.type_info);
   17.11  	    return 0;
   17.12  	}
   17.13      }
    18.1 --- a/xen/include/asm-ia64/regionreg.h	Thu Sep 08 07:24:08 2005 -0600
    18.2 +++ b/xen/include/asm-ia64/regionreg.h	Thu Sep 08 09:18:40 2005 -0600
    18.3 @@ -55,8 +55,8 @@ vmMangleRID(unsigned long RIDVal)
    18.4  
    18.5  	t.uint = RIDVal;
    18.6  	tmp = t.bytes[1];
    18.7 -	t.bytes[1] = t.bytes[3];
    18.8 -	t.bytes[3] = tmp;
    18.9 +	t.bytes[1] = t.bytes[2];
   18.10 +	t.bytes[2] = tmp;
   18.11  
   18.12  	return t.uint;
   18.13  }
    19.1 --- a/xen/include/asm-ia64/vmmu.h	Thu Sep 08 07:24:08 2005 -0600
    19.2 +++ b/xen/include/asm-ia64/vmmu.h	Thu Sep 08 09:18:40 2005 -0600
    19.3 @@ -225,8 +225,8 @@ typedef struct thash_cb {
    19.4             INVALID_ENTRY(hcb, hash) = 1;        \
    19.5             hash->next = NULL; }
    19.6  
    19.7 -#define PURGABLE_ENTRY(hcb,en)  \
    19.8 -		((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
    19.9 +#define PURGABLE_ENTRY(hcb,en)  1
   19.10 +//		((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
   19.11  
   19.12  
   19.13  /*
    20.1 --- a/xen/include/asm-ia64/vmx.h	Thu Sep 08 07:24:08 2005 -0600
    20.2 +++ b/xen/include/asm-ia64/vmx.h	Thu Sep 08 09:18:40 2005 -0600
    20.3 @@ -29,7 +29,6 @@ extern void identify_vmx_feature(void);
    20.4  extern unsigned int vmx_enabled;
    20.5  extern void vmx_init_env(void);
    20.6  extern void vmx_final_setup_domain(struct domain *d);
    20.7 -extern void vmx_init_double_mapping_stub(void);
    20.8  extern void vmx_save_state(struct vcpu *v);
    20.9  extern void vmx_load_state(struct vcpu *v);
   20.10  extern void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c);
   20.11 @@ -37,6 +36,7 @@ extern void vmx_setup_platform(struct vc
   20.12  extern vmx_insert_double_mapping(u64,u64,u64,u64,u64);
   20.13  extern void vmx_purge_double_mapping(u64, u64, u64);
   20.14  extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7);
   20.15 +extern void vmx_init_double_mapping_stub(void);
   20.16  #endif
   20.17  
   20.18  extern void vmx_wait_io(void);
    21.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Thu Sep 08 07:24:08 2005 -0600
    21.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Thu Sep 08 09:18:40 2005 -0600
    21.3 @@ -593,9 +593,10 @@ IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
    21.4      VMX_VPD(vcpu,vpsr) |= IA64_PSR_BN;
    21.5      return (IA64_NO_FAULT);
    21.6  }
    21.7 -
    21.8 +#if 0
    21.9  /* Another hash performance algorithm */
   21.10  #define redistribute_rid(rid)	(((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
   21.11 +#endif
   21.12  static inline unsigned long
   21.13  vmx_vrrtomrr(VCPU *v, unsigned long val)
   21.14  {
   21.15 @@ -603,14 +604,14 @@ vmx_vrrtomrr(VCPU *v, unsigned long val)
   21.16      u64	  rid;
   21.17  
   21.18      rr.rrval=val;
   21.19 -    rr.rid = vmMangleRID(v->arch.starting_rid  + rr.rid);
   21.20 +    rr.rid = rr.rid + v->arch.starting_rid;
   21.21 +    rr.ve = 1;
   21.22 +    return  vmMangleRID(rr.rrval);
   21.23  /* Disable this rid allocation algorithm for now */
   21.24  #if 0
   21.25      rid=(((u64)vcpu->domain->domain_id)<<DOMAIN_RID_SHIFT) + rr.rid;
   21.26      rr.rid = redistribute_rid(rid);
   21.27  #endif 
   21.28  
   21.29 -    rr.ve=1;
   21.30 -    return rr.rrval;
   21.31  }
   21.32  #endif
    22.1 --- a/xen/include/public/arch-ia64.h	Thu Sep 08 07:24:08 2005 -0600
    22.2 +++ b/xen/include/public/arch-ia64.h	Thu Sep 08 09:18:40 2005 -0600
    22.3 @@ -280,22 +280,4 @@ typedef struct vcpu_guest_context {
    22.4  
    22.5  #endif /* !__ASSEMBLY__ */
    22.6  
    22.7 -#define	XEN_HYPER_RFI			0x1
    22.8 -#define	XEN_HYPER_RSM_DT		0x2
    22.9 -#define	XEN_HYPER_SSM_DT		0x3
   22.10 -#define	XEN_HYPER_COVER			0x4
   22.11 -#define	XEN_HYPER_ITC_D			0x5
   22.12 -#define	XEN_HYPER_ITC_I			0x6
   22.13 -#define	XEN_HYPER_SSM_I			0x7
   22.14 -#define	XEN_HYPER_GET_IVR		0x8
   22.15 -#define	XEN_HYPER_GET_TPR		0x9
   22.16 -#define	XEN_HYPER_SET_TPR		0xa
   22.17 -#define	XEN_HYPER_EOI			0xb
   22.18 -#define	XEN_HYPER_SET_ITM		0xc
   22.19 -#define	XEN_HYPER_THASH			0xd
   22.20 -#define	XEN_HYPER_PTC_GA		0xe
   22.21 -#define	XEN_HYPER_ITR_D			0xf
   22.22 -#define	XEN_HYPER_GET_RR		0x10
   22.23 -#define	XEN_HYPER_SET_RR		0x11
   22.24 -
   22.25  #endif /* __HYPERVISOR_IF_IA64_H__ */