ia64/xen-unstable

changeset 8370:2d5c57be196d

Remove some unused VTI code segments
Signed-off-by Anthony Xu <anthony.xu@intel.com>
author djm@kirby.fc.hp.com
date Thu Dec 15 16:10:22 2005 -0600 (2005-12-15)
parents b57ed8182812
children e1ae4b162128
files xen/arch/ia64/vmx/vmx_entry.S xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/vmx/vmx_minstate.h xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_vcpu.c xen/include/asm-ia64/vmx.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_entry.S	Thu Dec 15 16:09:19 2005 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S	Thu Dec 15 16:10:22 2005 -0600
     1.3 @@ -373,34 +373,6 @@ vmx_dorfirfi_back:
     1.4      br.cond.sptk b0         // call the service
     1.5      ;;
     1.6  switch_rr7:
     1.7 -#ifdef XEN_DBL_MAPPING
     1.8 -// switch rr7 and rr5
     1.9 -    adds r24=SWITCH_MRR5_OFFSET, r21
    1.10 -    adds r26=SWITCH_MRR6_OFFSET, r21
    1.11 -    adds r16=SWITCH_MRR7_OFFSET ,r21
    1.12 -    movl r25=(5<<61)
    1.13 -    movl r27=(6<<61)
    1.14 -    movl r17=(7<<61)
    1.15 -    ;;
    1.16 -    ld8 r24=[r24]
    1.17 -    ld8 r26=[r26]
    1.18 -    ld8 r16=[r16]
    1.19 -    ;;
    1.20 -    mov rr[r25]=r24
    1.21 -    mov rr[r27]=r26
    1.22 -    mov rr[r17]=r16
    1.23 -    ;;
    1.24 -    srlz.i
    1.25 -    ;;
    1.26 -    add r24=SWITCH_MPTA_OFFSET, r21
    1.27 -    ;;
    1.28 -    ld8 r24=[r24]
    1.29 -    ;;
    1.30 -    mov cr.pta=r24
    1.31 -    ;;
    1.32 -    srlz.i
    1.33 -    ;;
    1.34 -#endif
    1.35  // fall through
    1.36  GLOBAL_ENTRY(ia64_vmm_entry)
    1.37  /*
     2.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Thu Dec 15 16:09:19 2005 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Thu Dec 15 16:10:22 2005 -0600
     2.3 @@ -133,10 +133,6 @@ vmx_init_env(void)
     2.4  	else
     2.5  		ASSERT(tmp_base != __vsa_base);
     2.6  
     2.7 -#ifdef XEN_DBL_MAPPING
     2.8 -	/* Init stub for rr7 switch */
     2.9 -	vmx_init_double_mapping_stub();
    2.10 -#endif 
    2.11  }
    2.12  
    2.13  typedef union {
    2.14 @@ -198,25 +194,6 @@ vmx_create_vp(struct vcpu *v)
    2.15  		panic("ia64_pal_vp_create failed. \n");
    2.16  }
    2.17  
    2.18 -#ifdef XEN_DBL_MAPPING
    2.19 -void vmx_init_double_mapping_stub(void)
    2.20 -{
    2.21 -	u64 base, psr;
    2.22 -	extern void vmx_switch_rr7(void);
    2.23 -
    2.24 -	base = (u64) &vmx_switch_rr7;
    2.25 -	base = *((u64*)base);
    2.26 -
    2.27 -	psr = ia64_clear_ic();
    2.28 -	ia64_itr(0x1, IA64_TR_RR7_SWITCH_STUB, XEN_RR7_SWITCH_STUB,
    2.29 -		 pte_val(pfn_pte(__pa(base) >> PAGE_SHIFT, PAGE_KERNEL)),
    2.30 -		 RR7_SWITCH_SHIFT);
    2.31 -	ia64_set_psr(psr);
    2.32 -	ia64_srlz_i();
    2.33 -	printk("Add TR mapping for rr7 switch stub, with physical: 0x%lx\n", (u64)(__pa(base)));
    2.34 -}
    2.35 -#endif
    2.36 -
    2.37  /* Other non-context related tasks can be done in context switch */
    2.38  void
    2.39  vmx_save_state(struct vcpu *v)
    2.40 @@ -229,14 +206,6 @@ vmx_save_state(struct vcpu *v)
    2.41  	if (status != PAL_STATUS_SUCCESS)
    2.42  		panic("Save vp status failed\n");
    2.43  
    2.44 -#ifdef XEN_DBL_MAPPING
    2.45 -	/* FIXME: Do we really need purge double mapping for old vcpu?
    2.46 -	 * Since rid is completely different between prev and next,
    2.47 -	 * it's not overlap and thus no MCA possible... */
    2.48 -	dom_rr7 = vmx_vrrtomrr(v, VMX(v, vrr[7]));
    2.49 -        vmx_purge_double_mapping(dom_rr7, KERNEL_START,
    2.50 -				 (u64)v->arch.vtlb->ts->vhpt->hash);
    2.51 -#endif
    2.52  
    2.53  	/* Need to save KR when domain switch, though HV itself doesn;t
    2.54  	 * use them.
    2.55 @@ -264,15 +233,6 @@ vmx_load_state(struct vcpu *v)
    2.56  	if (status != PAL_STATUS_SUCCESS)
    2.57  		panic("Restore vp status failed\n");
    2.58  
    2.59 -#ifdef XEN_DBL_MAPPING
    2.60 -	dom_rr7 = vmx_vrrtomrr(v, VMX(v, vrr[7]));
    2.61 -	pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL));
    2.62 -	pte_vhpt = pte_val(pfn_pte((__pa(v->arch.vtlb->ts->vhpt->hash) >> PAGE_SHIFT), PAGE_KERNEL));
    2.63 -	vmx_insert_double_mapping(dom_rr7, KERNEL_START,
    2.64 -				  (u64)v->arch.vtlb->ts->vhpt->hash,
    2.65 -				  pte_xen, pte_vhpt);
    2.66 -#endif
    2.67 -
    2.68  	ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
    2.69  	ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
    2.70  	ia64_set_kr(2, v->arch.arch_vmx.vkr[2]);
    2.71 @@ -285,25 +245,6 @@ vmx_load_state(struct vcpu *v)
    2.72  	 * anchored in vcpu */
    2.73  }
    2.74  
    2.75 -#ifdef XEN_DBL_MAPPING
    2.76 -/* Purge old double mapping and insert new one, due to rr7 change */
    2.77 -void
    2.78 -vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7)
    2.79 -{
    2.80 -	u64 pte_xen, pte_vhpt, vhpt_base;
    2.81 -
    2.82 -    vhpt_base = (u64)v->arch.vtlb->ts->vhpt->hash;
    2.83 -    vmx_purge_double_mapping(oldrr7, KERNEL_START,
    2.84 -				 vhpt_base);
    2.85 -
    2.86 -	pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL));
    2.87 -	pte_vhpt = pte_val(pfn_pte((__pa(vhpt_base) >> PAGE_SHIFT), PAGE_KERNEL));
    2.88 -	vmx_insert_double_mapping(newrr7, KERNEL_START,
    2.89 -				  vhpt_base,
    2.90 -				  pte_xen, pte_vhpt);
    2.91 -}
    2.92 -#endif // XEN_DBL_MAPPING
    2.93 -
    2.94  /*
    2.95   * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
    2.96   * is registered here.
     3.1 --- a/xen/arch/ia64/vmx/vmx_minstate.h	Thu Dec 15 16:09:19 2005 -0600
     3.2 +++ b/xen/arch/ia64/vmx/vmx_minstate.h	Thu Dec 15 16:10:22 2005 -0600
     3.3 @@ -125,31 +125,8 @@
     3.4   * Note that psr.ic is NOT turned on by this macro.  This is so that
     3.5   * we can pass interruption state as arguments to a handler.
     3.6   */
     3.7 -#ifdef XEN_DBL_MAPPING
     3.8 -#define SAVE_MIN_CHANGE_RR  \
     3.9 -/*  switch rr7 */       \
    3.10 -    movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)); \
    3.11 -    movl r17=(7<<61);        \
    3.12 -    movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)); \
    3.13 -    movl r22=(6<<61);        \
    3.14 -    movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1);     \
    3.15 -    movl r23=(5<<61);   \
    3.16 -    ;;              \
    3.17 -    mov rr[r17]=r16;             \
    3.18 -    mov rr[r22]=r20;         \
    3.19 -    mov rr[r23]=r18;         \
    3.20 -    ;;      \
    3.21 -    srlz.i;      \
    3.22 -    ;;
    3.23 -
    3.24 -#else
    3.25 -
    3.26 -#define SAVE_MIN_CHANGE_RR
    3.27 -
    3.28 -#endif
    3.29  
    3.30  #define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                           \
    3.31 -    SAVE_MIN_CHANGE_RR;      \
    3.32      VMX_MINSTATE_GET_CURRENT(r16);  /* M (or M;;I) */                   \
    3.33      mov r27=ar.rsc;         /* M */                         \
    3.34      mov r20=r1;         /* A */                         \
     4.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Thu Dec 15 16:09:19 2005 -0600
     4.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Thu Dec 15 16:10:22 2005 -0600
     4.3 @@ -260,7 +260,6 @@ vmx_load_all_rr(VCPU *vcpu)
     4.4  			     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4])));
     4.5  	}
     4.6  
     4.7 -#if 1
     4.8  	/* rr567 will be postponed to last point when resuming back to guest */
     4.9  	ia64_set_rr((VRN1 << VRN_SHIFT),
    4.10  		     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
    4.11 @@ -268,18 +267,15 @@ vmx_load_all_rr(VCPU *vcpu)
    4.12  		     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
    4.13  	ia64_set_rr((VRN3 << VRN_SHIFT),
    4.14  		     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
    4.15 -#endif
    4.16 -#ifndef XEN_DBL_MAPPING
    4.17 -    extern void * pal_vaddr;
    4.18      ia64_set_rr((VRN5 << VRN_SHIFT),
    4.19              vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
    4.20      ia64_set_rr((VRN6 << VRN_SHIFT),
    4.21              vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
    4.22 +    extern void * pal_vaddr;
    4.23      vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),(void *)vcpu->domain->shared_info,
    4.24                  (void *)vcpu->arch.privregs,
    4.25                  ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
    4.26      ia64_set_pta(vcpu->arch.arch_vmx.mpta);
    4.27 -#endif
    4.28  
    4.29  	ia64_srlz_d();
    4.30  	ia64_set_psr(psr);
     5.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Thu Dec 15 16:09:19 2005 -0600
     5.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Thu Dec 15 16:10:22 2005 -0600
     5.3 @@ -227,27 +227,11 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI
     5.4      VMX(vcpu,vrr[reg>>61]) = val;
     5.5  
     5.6      switch((u64)(reg>>61)) {
     5.7 -#ifdef XEN_DBL_MAPPING
     5.8 -    case VRN5:
     5.9 -        VMX(vcpu,mrr5)=vmx_vrrtomrr(vcpu,val);
    5.10 -        break;
    5.11 -    case VRN6:
    5.12 -        VMX(vcpu,mrr6)=vmx_vrrtomrr(vcpu,val);
    5.13 -        break;
    5.14 -    case VRN7:
    5.15 -        VMX(vcpu,mrr7)=vmx_vrrtomrr(vcpu,val);
    5.16 -        /* Change double mapping for this domain */
    5.17 -        vmx_change_double_mapping(vcpu,
    5.18 -                      vmx_vrrtomrr(vcpu,oldrr.rrval),
    5.19 -                      vmx_vrrtomrr(vcpu,newrr.rrval));
    5.20 -        break;
    5.21 -#else
    5.22      case VRN7:
    5.23         vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info,
    5.24          (void *)vcpu->arch.privregs,
    5.25         ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
    5.26         break;
    5.27 -#endif
    5.28      default:
    5.29          ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
    5.30          break;
     6.1 --- a/xen/include/asm-ia64/vmx.h	Thu Dec 15 16:09:19 2005 -0600
     6.2 +++ b/xen/include/asm-ia64/vmx.h	Thu Dec 15 16:10:22 2005 -0600
     6.3 @@ -32,13 +32,6 @@ extern void vmx_final_setup_guest(struct
     6.4  extern void vmx_save_state(struct vcpu *v);
     6.5  extern void vmx_load_state(struct vcpu *v);
     6.6  extern void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c);
     6.7 -#ifdef XEN_DBL_MAPPING
     6.8 -extern vmx_insert_double_mapping(u64,u64,u64,u64,u64);
     6.9 -extern void vmx_purge_double_mapping(u64, u64, u64);
    6.10 -extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7);
    6.11 -extern void vmx_init_double_mapping_stub(void);
    6.12 -#endif
    6.13 -
    6.14  extern void vmx_wait_io(void);
    6.15  extern void vmx_io_assist(struct vcpu *v);
    6.16