direct-io.hg

changeset 12431:4816a891b3d6

[IA64] Fix SMP Windows boot failure

Sometime SMP Windows can't boot, the root cause is guest timer interrupt
is lost.

This patch fixes following issues.
1. Windows uses different way to sync itc.
2. Previously when Guest timer fires and guest ITV is masked, XEN will
desert this Guest timer interrupt. It is not correct for windows,
windows may expect this timer interrupt.
3. Windows may use different way to set timer in some situations.
Windows first sets itm (which may be smaller than current itc), and
then sets itc (which is samller than itm).
XEN can support this way to set timer.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Fri Nov 10 11:19:57 2006 -0700 (2006-11-10)
parents 51be39239c47
children 371d2837a1fe
files xen/arch/ia64/asm-offsets.c xen/arch/ia64/vmx/optvfault.S xen/arch/ia64/vmx/vlsapic.c xen/include/asm-ia64/vtm.h
line diff
     1.1 --- a/xen/arch/ia64/asm-offsets.c	Fri Nov 10 11:19:51 2006 -0700
     1.2 +++ b/xen/arch/ia64/asm-offsets.c	Fri Nov 10 11:19:57 2006 -0700
     1.3 @@ -38,6 +38,7 @@ void foo(void)
     1.4  
     1.5  	BLANK();
     1.6  	DEFINE(VCPU_VTM_OFFSET_OFS, offsetof(struct vcpu, arch.arch_vmx.vtm.vtm_offset));
     1.7 +	DEFINE(VCPU_VTM_LAST_ITC_OFS, offsetof(struct vcpu, arch.arch_vmx.vtm.last_itc));
     1.8  	DEFINE(VCPU_VRR0_OFS, offsetof(struct vcpu, arch.arch_vmx.vrr[0]));
     1.9  #ifdef   VTI_DEBUG
    1.10  	DEFINE(IVT_CUR_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_current));
     2.1 --- a/xen/arch/ia64/vmx/optvfault.S	Fri Nov 10 11:19:51 2006 -0700
     2.2 +++ b/xen/arch/ia64/vmx/optvfault.S	Fri Nov 10 11:19:57 2006 -0700
     2.3 @@ -29,17 +29,22 @@ GLOBAL_ENTRY(vmx_asm_mov_from_ar)
     2.4      br.many vmx_virtualization_fault_back
     2.5  #endif
     2.6      add r18=VCPU_VTM_OFFSET_OFS,r21
     2.7 -    mov r19=ar.itc
     2.8 +    add r16=VCPU_VTM_LAST_ITC_OFS,r21
     2.9      extr.u r17=r25,6,7
    2.10      ;;
    2.11      ld8 r18=[r18]
    2.12 +    mov r19=ar.itc
    2.13 +    mov r24=b0
    2.14 +    ;;
    2.15 +    ld8 r16=[r16]
    2.16 +    add r19=r19,r18
    2.17      movl r20=asm_mov_to_reg
    2.18      ;;
    2.19      adds r30=vmx_resume_to_guest-asm_mov_to_reg,r20
    2.20      shladd r17=r17,4,r20
    2.21 -    mov r24=b0
    2.22 +    cmp.gtu p6,p0=r16,r19
    2.23      ;;
    2.24 -    add r19=r19,r18
    2.25 +    (p6) mov r19=r16
    2.26      mov b0=r17
    2.27      br.sptk.few b0
    2.28      ;;
     3.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Fri Nov 10 11:19:51 2006 -0700
     3.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Fri Nov 10 11:19:57 2006 -0700
     3.3 @@ -119,14 +119,11 @@ static uint64_t now_itc(vtime_t *vtm)
     3.4          if ( vtm->vtm_local_drift ) {
     3.5  //          guest_itc -= vtm->vtm_local_drift;
     3.6          }       
     3.7 -        if ( (long)(guest_itc - vtm->last_itc) > 0 ) {
     3.8 +        if (guest_itc >= vtm->last_itc)
     3.9              return guest_itc;
    3.10 -
    3.11 -        }
    3.12 -        else {
    3.13 +        else
    3.14              /* guest ITC backwarded due after LP switch */
    3.15              return vtm->last_itc;
    3.16 -        }
    3.17  }
    3.18  
    3.19  /*
    3.20 @@ -134,33 +131,42 @@ static uint64_t now_itc(vtime_t *vtm)
    3.21   */
    3.22  static void vtm_reset(VCPU *vcpu)
    3.23  {
    3.24 -    uint64_t    cur_itc;
    3.25 -    vtime_t     *vtm;
    3.26 -    
    3.27 -    vtm=&(vcpu->arch.arch_vmx.vtm);
    3.28 -    vtm->vtm_offset = 0;
    3.29 +    int i;
    3.30 +    u64 vtm_offset;
    3.31 +    VCPU *v;
    3.32 +    struct domain *d = vcpu->domain;
    3.33 +    vtime_t *vtm = &VMX(vcpu, vtm);
    3.34 +
    3.35 +    if (vcpu->vcpu_id == 0) {
    3.36 +        vtm_offset = 0UL - ia64_get_itc();
    3.37 +        for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) {
    3.38 +            if ((v = d->vcpu[i]) != NULL) {
    3.39 +                VMX(v, vtm).vtm_offset = vtm_offset;
    3.40 +                VMX(v, vtm).last_itc = 0;
    3.41 +            }
    3.42 +        }
    3.43 +    }
    3.44      vtm->vtm_local_drift = 0;
    3.45      VCPU(vcpu, itm) = 0;
    3.46      VCPU(vcpu, itv) = 0x10000;
    3.47 -    cur_itc = ia64_get_itc();
    3.48 -    vtm->last_itc = vtm->vtm_offset + cur_itc;
    3.49 +    vtm->last_itc = 0;
    3.50  }
    3.51  
    3.52  /* callback function when vtm_timer expires */
    3.53  static void vtm_timer_fn(void *data)
    3.54  {
    3.55 -    vtime_t *vtm;
    3.56 -    VCPU    *vcpu = data;
    3.57 -    u64	    cur_itc,vitv;
    3.58 +    VCPU *vcpu = data;
    3.59 +    vtime_t *vtm = &VMX(vcpu, vtm);
    3.60 +    u64 vitv;
    3.61  
    3.62      vitv = VCPU(vcpu, itv);
    3.63 -    if ( !ITV_IRQ_MASK(vitv) ){
    3.64 -        vmx_vcpu_pend_interrupt(vcpu, vitv & 0xff);
    3.65 +    if (!ITV_IRQ_MASK(vitv)) {
    3.66 +        vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(vitv));
    3.67          vcpu_unblock(vcpu);
    3.68 -    }
    3.69 -    vtm=&(vcpu->arch.arch_vmx.vtm);
    3.70 -    cur_itc = now_itc(vtm);
    3.71 -    update_last_itc(vtm,cur_itc);  // pseudo read to update vITC
    3.72 +    } else
    3.73 +        vtm->pending = 1;
    3.74 +
    3.75 +    update_last_itc(vtm, VCPU(vcpu, itm));  // update vITC
    3.76  }
    3.77  
    3.78  void vtm_init(VCPU *vcpu)
    3.79 @@ -168,7 +174,7 @@ void vtm_init(VCPU *vcpu)
    3.80      vtime_t     *vtm;
    3.81      uint64_t    itc_freq;
    3.82      
    3.83 -    vtm=&(vcpu->arch.arch_vmx.vtm);
    3.84 +    vtm = &VMX(vcpu, vtm);
    3.85  
    3.86      itc_freq = local_cpu_data->itc_freq;
    3.87      vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000;
    3.88 @@ -182,36 +188,38 @@ void vtm_init(VCPU *vcpu)
    3.89   */
    3.90  uint64_t vtm_get_itc(VCPU *vcpu)
    3.91  {
    3.92 -    uint64_t    guest_itc;
    3.93 -    vtime_t    *vtm;
    3.94 +    uint64_t guest_itc;
    3.95 +    vtime_t *vtm = &VMX(vcpu, vtm);
    3.96  
    3.97 -    vtm=&(vcpu->arch.arch_vmx.vtm);
    3.98      guest_itc = now_itc(vtm);
    3.99 -    update_last_itc(vtm, guest_itc);  // update vITC
   3.100      return guest_itc;
   3.101  }
   3.102  
   3.103  
   3.104  void vtm_set_itc(VCPU *vcpu, uint64_t new_itc)
   3.105  {
   3.106 -    uint64_t    vitm, vitv;
   3.107 -    vtime_t     *vtm;
   3.108 -    vitm = VCPU(vcpu,itm);
   3.109 -    vitv = VCPU(vcpu,itv);
   3.110 -    vtm=&(vcpu->arch.arch_vmx.vtm);
   3.111 -    if(vcpu->vcpu_id == 0){
   3.112 -        vtm->vtm_offset = new_itc - ia64_get_itc();
   3.113 -        vtm->last_itc = new_itc;
   3.114 +    int i;
   3.115 +    uint64_t vitm, vtm_offset;
   3.116 +    vtime_t *vtm;
   3.117 +    VCPU *v;
   3.118 +    struct domain *d = vcpu->domain;
   3.119 +
   3.120 +    vitm = VCPU(vcpu, itm);
   3.121 +    vtm = &VMX(vcpu, vtm);
   3.122 +    if (vcpu->vcpu_id == 0) {
   3.123 +        vtm_offset = new_itc - ia64_get_itc();
   3.124 +        for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) {
   3.125 +            if ((v = d->vcpu[i]) != NULL) {
   3.126 +                VMX(v, vtm).vtm_offset = vtm_offset;
   3.127 +                VMX(v, vtm).last_itc = 0;
   3.128 +            }
   3.129 +        }
   3.130      }
   3.131 -    else{
   3.132 -        vtm->vtm_offset = vcpu->domain->vcpu[0]->arch.arch_vmx.vtm.vtm_offset;
   3.133 -        new_itc=vtm->vtm_offset + ia64_get_itc();
   3.134 -        vtm->last_itc = new_itc;
   3.135 -    }
   3.136 -    if(vitm < new_itc){
   3.137 -        vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(vitv));
   3.138 +    vtm->last_itc = 0;
   3.139 +    if (vitm <= new_itc)
   3.140          stop_timer(&vtm->vtm_timer);
   3.141 -    }
   3.142 +    else
   3.143 +        vtm_set_itm(vcpu, vitm);
   3.144  }
   3.145  
   3.146  
   3.147 @@ -223,16 +231,16 @@ void vtm_set_itm(VCPU *vcpu, uint64_t va
   3.148  {
   3.149      vtime_t *vtm;
   3.150      uint64_t   vitv, cur_itc, expires;
   3.151 +
   3.152      vitv = VCPU(vcpu, itv);
   3.153 -    vtm=&(vcpu->arch.arch_vmx.vtm);
   3.154 -    // TODO; need to handle VHPI in future
   3.155 -    vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(vitv));
   3.156 -    VCPU(vcpu,itm)=val;
   3.157 -    if (val >= vtm->last_itc) {
   3.158 +    vtm = &VMX(vcpu, vtm);
   3.159 +    VCPU(vcpu, itm) = val;
   3.160 +    if (val > vtm->last_itc) {
   3.161          cur_itc = now_itc(vtm);
   3.162          if (time_before(val, cur_itc))
   3.163              val = cur_itc;
   3.164          expires = NOW() + cycle_to_ns(val-cur_itc) + TIMER_SLOP;
   3.165 +        vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(vitv));
   3.166          set_timer(&vtm->vtm_timer, expires);
   3.167      }else{
   3.168          stop_timer(&vtm->vtm_timer);
   3.169 @@ -242,14 +250,13 @@ void vtm_set_itm(VCPU *vcpu, uint64_t va
   3.170  
   3.171  void vtm_set_itv(VCPU *vcpu, uint64_t val)
   3.172  {
   3.173 -    uint64_t    olditv;
   3.174 -    olditv = VCPU(vcpu, itv);
   3.175 +    vtime_t *vtm = &VMX(vcpu, vtm);
   3.176 +
   3.177      VCPU(vcpu, itv) = val;
   3.178 -    if(ITV_IRQ_MASK(val)){
   3.179 -        vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(olditv));
   3.180 -    }else if(ITV_VECTOR(olditv)!=ITV_VECTOR(val)){
   3.181 -        if (vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(olditv)))
   3.182 -            vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
   3.183 +
   3.184 +    if (!ITV_IRQ_MASK(val) && vtm->pending) {
   3.185 +        vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
   3.186 +        vtm->pending = 0;
   3.187      }
   3.188  }
   3.189  
     4.1 --- a/xen/include/asm-ia64/vtm.h	Fri Nov 10 11:19:51 2006 -0700
     4.2 +++ b/xen/include/asm-ia64/vtm.h	Fri Nov 10 11:19:57 2006 -0700
     4.3 @@ -33,7 +33,8 @@
     4.4  typedef struct vtime {
     4.5      	long        vtm_offset; // guest ITC = host ITC + vtm_offset
     4.6      	uint64_t    vtm_local_drift;
     4.7 -	uint64_t   last_itc;
     4.8 +	uint64_t    last_itc;
     4.9 +    	uint64_t    pending;
    4.10      	/* 
    4.11      	 * Local drift (temporary) after guest suspension
    4.12      	 * In case of long jump amount of ITC after suspension,