ia64/xen-unstable

changeset 9157:a693ccb4d581

[IA64] VTI: fix Oops: time tick before it's due

1. Guest may set itm several times in one execution of timer handler of
guest. VMM need to handle this situation.
2. VMM don't need to stop guest timer when switching out and rest guest
timer when switching in, this may make room for some corner case, I don't
figure out this kind of corner cases now :-), I just removed this logic.
3. When VMM emulate writing itv, VMM can't simply stop timer, when guest
is masked.
4. All operations such as read/write itv, itc, itm don't need to disable
interrupt, due to there is no conflict access.

After all these modifications, VTIdomain don't complain "Oops: time tick
before it's due", I don't do the full test:-).

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Tue Mar 07 20:01:29 2006 -0700 (2006-03-07)
parents 9215a9a1af9e
children d00fa9827789
files xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/xen/xenmisc.c xen/include/asm-ia64/vmx_vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Tue Mar 07 17:14:32 2006 -0700
     1.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Tue Mar 07 20:01:29 2006 -0700
     1.3 @@ -97,16 +97,15 @@ static void vtm_timer_fn(void *data)
     1.4  {
     1.5      vtime_t *vtm;
     1.6      VCPU    *vcpu = data;
     1.7 -    u64	    cur_itc,vitm;
     1.8 +    u64	    cur_itc,vitv;
     1.9  
    1.10 -    UINT64  vec;
    1.11 -    
    1.12 -    vec = VCPU(vcpu, itv) & 0xff;
    1.13 -    vmx_vcpu_pend_interrupt(vcpu, vec);
    1.14 -
    1.15 +    vitv = VCPU(vcpu, itv);
    1.16 +    if ( !ITV_IRQ_MASK(vitv) ){
    1.17 +        vmx_vcpu_pend_interrupt(vcpu, vitv & 0xff);
    1.18 +    }
    1.19      vtm=&(vcpu->arch.arch_vmx.vtm);
    1.20      cur_itc = now_itc(vtm);
    1.21 -    vitm =VCPU(vcpu, itm);
    1.22 + //    vitm =VCPU(vcpu, itm);
    1.23   //fire_itc2 = cur_itc;
    1.24   //fire_itm2 = vitm;
    1.25      update_last_itc(vtm,cur_itc);  // pseudo read to update vITC
    1.26 @@ -135,51 +134,72 @@ uint64_t vtm_get_itc(VCPU *vcpu)
    1.27      vtime_t    *vtm;
    1.28  
    1.29      vtm=&(vcpu->arch.arch_vmx.vtm);
    1.30 -    // FIXME: should use local_irq_disable & local_irq_enable ??
    1.31 -    local_irq_save(spsr);
    1.32      guest_itc = now_itc(vtm);
    1.33 -//    update_last_itc(vtm, guest_itc);
    1.34 -
    1.35 -    local_irq_restore(spsr);
    1.36      return guest_itc;
    1.37  }
    1.38  
    1.39 +
    1.40 +
    1.41 +
    1.42  void vtm_set_itc(VCPU *vcpu, uint64_t new_itc)
    1.43  {
    1.44 -    uint64_t    spsr;
    1.45 +    uint64_t    vitm, vitv;
    1.46      vtime_t     *vtm;
    1.47 -
    1.48 +    vitm = VCPU(vcpu,itm);
    1.49 +    vitv = VCPU(vcpu,itv);
    1.50      vtm=&(vcpu->arch.arch_vmx.vtm);
    1.51 -    local_irq_save(spsr);
    1.52      vtm->vtm_offset = new_itc - ia64_get_itc();
    1.53      vtm->last_itc = new_itc;
    1.54 -    vtm_interruption_update(vcpu, vtm);
    1.55 -    local_irq_restore(spsr);
    1.56 +    if(vitm < new_itc){
    1.57 +        clear_bit(ITV_VECTOR(vitv), &VCPU(vcpu, irr[0]));
    1.58 +        stop_timer(&vtm->vtm_timer);
    1.59 +    }
    1.60  }
    1.61  
    1.62 -void vtm_set_itv(VCPU *vcpu)
    1.63 +
    1.64 +#define TIMER_SLOP (50*1000) /* ns */  /* copy from timer.c */
    1.65 +extern u64 cycle_to_ns(u64 cyle);
    1.66 +
    1.67 +
    1.68 +void vtm_set_itm(VCPU *vcpu, uint64_t val)
    1.69  {
    1.70 -    uint64_t    spsr,itv;
    1.71 -    vtime_t     *vtm;
    1.72 -
    1.73 +    vtime_t *vtm;
    1.74 +    uint64_t   vitv, cur_itc, expires;
    1.75 +    vitv = VCPU(vcpu, itv);
    1.76      vtm=&(vcpu->arch.arch_vmx.vtm);
    1.77 -    local_irq_save(spsr);
    1.78 -    itv = VCPU(vcpu, itv);
    1.79 -    if ( ITV_IRQ_MASK(itv) )
    1.80 +    // TODO; need to handle VHPI in future
    1.81 +    clear_bit(ITV_VECTOR(vitv), &VCPU(vcpu, irr[0]));
    1.82 +    VCPU(vcpu,itm)=val;
    1.83 +    cur_itc =now_itc(vtm);
    1.84 +    if(val >  vtm->last_itc){
    1.85 +        expires = NOW() + cycle_to_ns(val-cur_itc) + TIMER_SLOP;
    1.86 +        set_timer(&vtm->vtm_timer, expires);
    1.87 +    }else{
    1.88          stop_timer(&vtm->vtm_timer);
    1.89 -    vtm_interruption_update(vcpu, vtm);
    1.90 -    local_irq_restore(spsr);
    1.91 +    }
    1.92 +}
    1.93 +
    1.94 +
    1.95 +void vtm_set_itv(VCPU *vcpu, uint64_t val)
    1.96 +{
    1.97 +    uint64_t    olditv;
    1.98 +    olditv = VCPU(vcpu, itv);
    1.99 +    VCPU(vcpu, itv) = val;
   1.100 +    if(ITV_IRQ_MASK(val)){
   1.101 +        clear_bit(ITV_VECTOR(olditv), &VCPU(vcpu, irr[0]));
   1.102 +    }else if(ITV_VECTOR(olditv)!=ITV_VECTOR(val)){
   1.103 +        if(test_and_clear_bit(ITV_VECTOR(olditv), &VCPU(vcpu, irr[0])))
   1.104 +            set_bit(ITV_VECTOR(val), &VCPU(vcpu, irr[0]));
   1.105 +    }
   1.106  }
   1.107  
   1.108  
   1.109  /*
   1.110 - * Update interrupt or hook the vtm timer for fire 
   1.111 + * Update interrupt or hook the vtm timer for fire
   1.112   * At this point vtm_timer should be removed if itv is masked.
   1.113   */
   1.114  /* Interrupt must be disabled at this point */
   1.115 -
   1.116 -extern u64 cycle_to_ns(u64 cyle);
   1.117 -#define TIMER_SLOP (50*1000) /* ns */  /* copy from timer.c */
   1.118 +/*
   1.119  void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
   1.120  {
   1.121      uint64_t    cur_itc,vitm,vitv;
   1.122 @@ -197,8 +217,7 @@ void vtm_interruption_update(VCPU *vcpu,
   1.123      cur_itc =now_itc(vtm);
   1.124      diff_last = vtm->last_itc - vitm;
   1.125      diff_now = cur_itc - vitm;
   1.126 -    update_last_itc (vtm,cur_itc);
   1.127 -    
   1.128 +
   1.129      if ( diff_last >= 0 ) {
   1.130          // interrupt already fired.
   1.131          stop_timer(&vtm->vtm_timer);
   1.132 @@ -207,28 +226,32 @@ void vtm_interruption_update(VCPU *vcpu,
   1.133          // ITV is fired.
   1.134          vmx_vcpu_pend_interrupt(vcpu, vitv&0xff);
   1.135      }
   1.136 +*/
   1.137      /* Both last_itc & cur_itc < itm, wait for fire condition */
   1.138 -    else {
   1.139 +/*    else {
   1.140          expires = NOW() + cycle_to_ns(0-diff_now) + TIMER_SLOP;
   1.141          set_timer(&vtm->vtm_timer, expires);
   1.142      }
   1.143      local_irq_restore(spsr);
   1.144  }
   1.145 + */
   1.146  
   1.147  /*
   1.148   * Action for vtm when the domain is scheduled out.
   1.149   * Remove the timer for vtm.
   1.150   */
   1.151 +/*
   1.152  void vtm_domain_out(VCPU *vcpu)
   1.153  {
   1.154      if(!is_idle_domain(vcpu->domain))
   1.155  	stop_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
   1.156  }
   1.157 -
   1.158 + */
   1.159  /*
   1.160   * Action for vtm when the domain is scheduled in.
   1.161   * Fire vtm IRQ or add the timer for vtm.
   1.162   */
   1.163 +/*
   1.164  void vtm_domain_in(VCPU *vcpu)
   1.165  {
   1.166      vtime_t     *vtm;
   1.167 @@ -238,6 +261,7 @@ void vtm_domain_in(VCPU *vcpu)
   1.168  	vtm_interruption_update(vcpu, vtm);
   1.169      }
   1.170  }
   1.171 + */
   1.172  
   1.173  /*
   1.174   * Next for vLSapic
     2.1 --- a/xen/arch/ia64/xen/xenmisc.c	Tue Mar 07 17:14:32 2006 -0700
     2.2 +++ b/xen/arch/ia64/xen/xenmisc.c	Tue Mar 07 20:01:29 2006 -0700
     2.3 @@ -306,9 +306,9 @@ void context_switch(struct vcpu *prev, s
     2.4      uint64_t pta;
     2.5  
     2.6      local_irq_save(spsr);
     2.7 -    if(VMX_DOMAIN(prev)){
     2.8 -    	vtm_domain_out(prev);
     2.9 -    }
    2.10 +//    if(VMX_DOMAIN(prev)){
    2.11 +//    	vtm_domain_out(prev);
    2.12 +//    }
    2.13  	context_switch_count++;
    2.14  	switch_to(prev,next,prev);
    2.15  //    if(VMX_DOMAIN(current)){
    2.16 @@ -326,7 +326,7 @@ if (!i--) { printk("+"); i = 1000000; }
    2.17  }
    2.18  
    2.19      if (VMX_DOMAIN(current)){
    2.20 -        vtm_domain_in(current);
    2.21 +//        vtm_domain_in(current);
    2.22  		vmx_load_all_rr(current);
    2.23      }else{
    2.24      	extern char ia64_ivt;
     3.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Tue Mar 07 17:14:32 2006 -0700
     3.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Tue Mar 07 20:01:29 2006 -0700
     3.3 @@ -102,10 +102,11 @@ extern IA64FAULT vmx_vcpu_set_psr_l(VCPU
     3.4  extern void vtm_init(VCPU *vcpu);
     3.5  extern uint64_t vtm_get_itc(VCPU *vcpu);
     3.6  extern void vtm_set_itc(VCPU *vcpu, uint64_t new_itc);
     3.7 -extern void vtm_set_itv(VCPU *vcpu);
     3.8 +extern void vtm_set_itv(VCPU *vcpu, uint64_t val);
     3.9 +extern void vtm_set_itm(VCPU *vcpu, uint64_t val);
    3.10  extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
    3.11 -extern void vtm_domain_out(VCPU *vcpu);
    3.12 -extern void vtm_domain_in(VCPU *vcpu);
    3.13 +//extern void vtm_domain_out(VCPU *vcpu);
    3.14 +//extern void vtm_domain_in(VCPU *vcpu);
    3.15  extern void vlsapic_reset(VCPU *vcpu);
    3.16  extern int vmx_check_pending_irq(VCPU *vcpu);
    3.17  extern void guest_write_eoi(VCPU *vcpu);
    3.18 @@ -255,10 +256,7 @@ static inline
    3.19  IA64FAULT
    3.20  vmx_vcpu_set_itm(VCPU *vcpu, u64 val)
    3.21  {
    3.22 -    vtime_t     *vtm;
    3.23 -    vtm=&(vcpu->arch.arch_vmx.vtm);
    3.24 -    VCPU(vcpu,itm)=val;
    3.25 -    vtm_interruption_update(vcpu, vtm);
    3.26 +    vtm_set_itm(vcpu, val);
    3.27      return IA64_NO_FAULT;
    3.28  }
    3.29  static inline
    3.30 @@ -299,8 +297,7 @@ IA64FAULT
    3.31  vmx_vcpu_set_itv(VCPU *vcpu, u64 val)
    3.32  {
    3.33  
    3.34 -    VCPU(vcpu,itv)=val;
    3.35 -    vtm_set_itv(vcpu);
    3.36 +    vtm_set_itv(vcpu, val);
    3.37      return IA64_NO_FAULT;
    3.38  }
    3.39  static inline