ia64/xen-unstable
changeset 11828:d5a46e4cc340
[IA64] Fix Windows Timer stop issue.
When doing HCT testing on Guest Windows, Windows Timer might stop,
if there is no mouse activity.
When implementing vhpi acceleration, I didn't think of below situation.
windows uses "epc" instruction to implement system call like linux,
In a very small code sequence including "epc" instruction, if there is
an external interrupt, Windows will defer handling this external interrupt
by rfi with ipsr.i=0.
Signed-off-by, Anthony Xu <anthony.xu@intel.com>
When doing HCT testing on Guest Windows, Windows Timer might stop,
if there is no mouse activity.
When implementing vhpi acceleration, I didn't think of below situation.
windows uses "epc" instruction to implement system call like linux,
In a very small code sequence including "epc" instruction, if there is
an external interrupt, Windows will defer handling this external interrupt
by rfi with ipsr.i=0.
Signed-off-by, Anthony Xu <anthony.xu@intel.com>
author | awilliam@xenbuild.aw |
---|---|
date | Sun Oct 22 14:39:15 2006 -0600 (2006-10-22) |
parents | a7c6b1c5507c |
children | 6492b9b27968 |
files | xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/xen/xentime.c |
line diff
1.1 --- a/xen/arch/ia64/vmx/vlsapic.c Sun Oct 22 14:14:58 2006 -0600 1.2 +++ b/xen/arch/ia64/vmx/vlsapic.c Sun Oct 22 14:39:15 2006 -0600 1.3 @@ -57,6 +57,59 @@ static void update_last_itc(vtime_t *vtm 1.4 } 1.5 1.6 /* 1.7 + * Next for vLSapic 1.8 + */ 1.9 + 1.10 +#define NMI_VECTOR 2 1.11 +#define ExtINT_VECTOR 0 1.12 +#define NULL_VECTOR -1 1.13 + 1.14 +static void update_vhpi(VCPU *vcpu, int vec) 1.15 +{ 1.16 + u64 vhpi; 1.17 + 1.18 + if (vec == NULL_VECTOR) 1.19 + vhpi = 0; 1.20 + else if (vec == NMI_VECTOR) 1.21 + vhpi = 32; 1.22 + else if (vec == ExtINT_VECTOR) 1.23 + vhpi = 16; 1.24 + else 1.25 + vhpi = vec >> 4; 1.26 + 1.27 + VCPU(vcpu,vhpi) = vhpi; 1.28 + // TODO: Add support for XENO 1.29 + if (VCPU(vcpu,vac).a_int) 1.30 + ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT, 1.31 + (uint64_t)vcpu->arch.privregs, 0, 0, 0, 0, 0, 0); 1.32 +} 1.33 + 1.34 + 1.35 +/* 1.36 + * May come from virtualization fault or 1.37 + * nested host interrupt. 1.38 + */ 1.39 +static int vmx_vcpu_unpend_interrupt(VCPU *vcpu, uint8_t vector) 1.40 +{ 1.41 + uint64_t spsr; 1.42 + int ret; 1.43 + 1.44 + if (vector & ~0xff) { 1.45 + DPRINTK("vmx_vcpu_pend_interrupt: bad vector\n"); 1.46 + return -1; 1.47 + } 1.48 + 1.49 + local_irq_save(spsr); 1.50 + ret = test_and_clear_bit(vector, &VCPU(vcpu, irr[0])); 1.51 + local_irq_restore(spsr); 1.52 + 1.53 + if (ret) 1.54 + vcpu->arch.irq_new_pending = 1; 1.55 + 1.56 + return ret; 1.57 +} 1.58 + 1.59 +/* 1.60 * ITC value saw in guest (host+offset+drift). 1.61 */ 1.62 static uint64_t now_itc(vtime_t *vtm) 1.63 @@ -107,9 +160,6 @@ static void vtm_timer_fn(void *data) 1.64 } 1.65 vtm=&(vcpu->arch.arch_vmx.vtm); 1.66 cur_itc = now_itc(vtm); 1.67 - // vitm =VCPU(vcpu, itm); 1.68 - //fire_itc2 = cur_itc; 1.69 - //fire_itm2 = vitm; 1.70 update_last_itc(vtm,cur_itc); // pseudo read to update vITC 1.71 } 1.72 1.73 @@ -137,6 +187,7 @@ uint64_t vtm_get_itc(VCPU *vcpu) 1.74 1.75 vtm=&(vcpu->arch.arch_vmx.vtm); 1.76 guest_itc = now_itc(vtm); 1.77 + update_last_itc(vtm, guest_itc); // update vITC 1.78 return guest_itc; 1.79 } 1.80 1.81 @@ -158,7 +209,7 @@ void vtm_set_itc(VCPU *vcpu, uint64_t ne 1.82 vtm->last_itc = new_itc; 1.83 } 1.84 if(vitm < new_itc){ 1.85 - clear_bit(ITV_VECTOR(vitv), &VCPU(vcpu, irr[0])); 1.86 + vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(vitv)); 1.87 stop_timer(&vtm->vtm_timer); 1.88 } 1.89 } 1.90 @@ -175,12 +226,12 @@ void vtm_set_itm(VCPU *vcpu, uint64_t va 1.91 vitv = VCPU(vcpu, itv); 1.92 vtm=&(vcpu->arch.arch_vmx.vtm); 1.93 // TODO; need to handle VHPI in future 1.94 - clear_bit(ITV_VECTOR(vitv), &VCPU(vcpu, irr[0])); 1.95 + vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(vitv)); 1.96 VCPU(vcpu,itm)=val; 1.97 - cur_itc =now_itc(vtm); 1.98 - if(time_before(val, cur_itc)) 1.99 - val = cur_itc; 1.100 - if(val > vtm->last_itc){ 1.101 + if (val >= vtm->last_itc) { 1.102 + cur_itc = now_itc(vtm); 1.103 + if (time_before(val, cur_itc)) 1.104 + val = cur_itc; 1.105 expires = NOW() + cycle_to_ns(val-cur_itc) + TIMER_SLOP; 1.106 set_timer(&vtm->vtm_timer, expires); 1.107 }else{ 1.108 @@ -195,10 +246,10 @@ void vtm_set_itv(VCPU *vcpu, uint64_t va 1.109 olditv = VCPU(vcpu, itv); 1.110 VCPU(vcpu, itv) = val; 1.111 if(ITV_IRQ_MASK(val)){ 1.112 - clear_bit(ITV_VECTOR(olditv), &VCPU(vcpu, irr[0])); 1.113 + vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(olditv)); 1.114 }else if(ITV_VECTOR(olditv)!=ITV_VECTOR(val)){ 1.115 - if(test_and_clear_bit(ITV_VECTOR(olditv), &VCPU(vcpu, irr[0]))) 1.116 - set_bit(ITV_VECTOR(val), &VCPU(vcpu, irr[0])); 1.117 + if (vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(olditv))) 1.118 + vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(val)); 1.119 } 1.120 } 1.121 1.122 @@ -272,36 +323,6 @@ void vtm_domain_in(VCPU *vcpu) 1.123 } 1.124 */ 1.125 1.126 -/* 1.127 - * Next for vLSapic 1.128 - */ 1.129 - 1.130 -#define NMI_VECTOR 2 1.131 -#define ExtINT_VECTOR 0 1.132 -#define NULL_VECTOR -1 1.133 -static void update_vhpi(VCPU *vcpu, int vec) 1.134 -{ 1.135 - u64 vhpi; 1.136 - if ( vec == NULL_VECTOR ) { 1.137 - vhpi = 0; 1.138 - } 1.139 - else if ( vec == NMI_VECTOR ) { // NMI 1.140 - vhpi = 32; 1.141 - } else if (vec == ExtINT_VECTOR) { //ExtINT 1.142 - vhpi = 16; 1.143 - } 1.144 - else { 1.145 - vhpi = vec >> 4; 1.146 - } 1.147 - 1.148 - VCPU(vcpu,vhpi) = vhpi; 1.149 - // TODO: Add support for XENO 1.150 - if ( VCPU(vcpu,vac).a_int ) { 1.151 - ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT, 1.152 - (uint64_t)vcpu->arch.privregs, 0, 0, 0, 0, 0, 0); 1.153 - } 1.154 -} 1.155 - 1.156 #ifdef V_IOSAPIC_READY 1.157 /* Assist to check virtual interrupt lines */ 1.158 void vmx_virq_line_assist(struct vcpu *v) 1.159 @@ -524,10 +545,14 @@ int vmx_vcpu_pend_interrupt(VCPU *vcpu, 1.160 local_irq_save(spsr); 1.161 ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0])); 1.162 local_irq_restore(spsr); 1.163 - vcpu->arch.irq_new_pending = 1; 1.164 + 1.165 + if (!ret) 1.166 + vcpu->arch.irq_new_pending = 1; 1.167 + 1.168 return ret; 1.169 } 1.170 1.171 + 1.172 /* 1.173 * Add batch of pending interrupt. 1.174 * The interrupt source is contained in pend_irr[0-3] with 1.175 @@ -559,14 +584,13 @@ void vmx_vcpu_pend_batch_interrupt(VCPU 1.176 */ 1.177 int vmx_check_pending_irq(VCPU *vcpu) 1.178 { 1.179 - uint64_t spsr, mask; 1.180 - int h_pending, h_inservice; 1.181 - uint64_t isr; 1.182 - IA64_PSR vpsr; 1.183 + int mask, h_pending, h_inservice; 1.184 + uint64_t isr; 1.185 + IA64_PSR vpsr; 1.186 REGS *regs=vcpu_regs(vcpu); 1.187 - local_irq_save(spsr); 1.188 h_pending = highest_pending_irq(vcpu); 1.189 if ( h_pending == NULL_VECTOR ) { 1.190 + update_vhpi(vcpu, NULL_VECTOR); 1.191 h_pending = SPURIOUS_VECTOR; 1.192 goto chk_irq_exit; 1.193 } 1.194 @@ -578,13 +602,11 @@ int vmx_check_pending_irq(VCPU *vcpu) 1.195 isr = vpsr.val & IA64_PSR_RI; 1.196 if ( !vpsr.ic ) 1.197 panic_domain(regs,"Interrupt when IC=0\n"); 1.198 + update_vhpi(vcpu, h_pending); 1.199 + vmx_reflect_interruption(0, isr, 0, 12, regs); // EXT IRQ 1.200 + } else if (mask == IRQ_MASKED_BY_INSVC) { 1.201 if (VCPU(vcpu, vhpi)) 1.202 update_vhpi(vcpu, NULL_VECTOR); 1.203 - vmx_reflect_interruption(0,isr,0, 12, regs ); // EXT IRQ 1.204 - } 1.205 - else if ( mask == IRQ_MASKED_BY_INSVC ) { 1.206 - // cann't inject VHPI 1.207 -// DPRINTK("IRQ masked by higher inservice\n"); 1.208 } 1.209 else { 1.210 // masked by vpsr.i or vtpr. 1.211 @@ -592,7 +614,6 @@ int vmx_check_pending_irq(VCPU *vcpu) 1.212 } 1.213 1.214 chk_irq_exit: 1.215 - local_irq_restore(spsr); 1.216 return h_pending; 1.217 } 1.218 1.219 @@ -602,17 +623,13 @@ chk_irq_exit: 1.220 void guest_write_eoi(VCPU *vcpu) 1.221 { 1.222 int vec; 1.223 - uint64_t spsr; 1.224 1.225 vec = highest_inservice_irq(vcpu); 1.226 if ( vec == NULL_VECTOR ) 1.227 - panic_domain(vcpu_regs(vcpu),"Wrong vector to EOI\n"); 1.228 - local_irq_save(spsr); 1.229 + panic_domain(vcpu_regs(vcpu), "Wrong vector to EOI\n"); 1.230 VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63)); 1.231 - local_irq_restore(spsr); 1.232 VCPU(vcpu, eoi)=0; // overwrite the data 1.233 vcpu->arch.irq_new_pending=1; 1.234 -// vmx_check_pending_irq(vcpu); 1.235 } 1.236 1.237 int is_unmasked_irq(VCPU *vcpu) 1.238 @@ -631,23 +648,21 @@ int is_unmasked_irq(VCPU *vcpu) 1.239 1.240 uint64_t guest_read_vivr(VCPU *vcpu) 1.241 { 1.242 - int vec, h_inservice; 1.243 - uint64_t spsr; 1.244 - 1.245 - local_irq_save(spsr); 1.246 + int vec, h_inservice, mask; 1.247 vec = highest_pending_irq(vcpu); 1.248 h_inservice = highest_inservice_irq(vcpu); 1.249 - if ( vec == NULL_VECTOR || 1.250 - irq_masked(vcpu, vec, h_inservice) != IRQ_NO_MASKED ) { 1.251 - local_irq_restore(spsr); 1.252 + mask = irq_masked(vcpu, vec, h_inservice); 1.253 + if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) { 1.254 + if (VCPU(vcpu, vhpi)) 1.255 + update_vhpi(vcpu, NULL_VECTOR); 1.256 return IA64_SPURIOUS_INT_VECTOR; 1.257 } 1.258 - 1.259 + if (mask == IRQ_MASKED_BY_VTPR) { 1.260 + update_vhpi(vcpu, vec); 1.261 + return IA64_SPURIOUS_INT_VECTOR; 1.262 + } 1.263 VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63)); 1.264 - VCPU(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63)); 1.265 - if (VCPU(vcpu, vhpi)) 1.266 - update_vhpi(vcpu, NULL_VECTOR); // clear VHPI till EOI or IRR write 1.267 - local_irq_restore(spsr); 1.268 + vmx_vcpu_unpend_interrupt(vcpu, vec); 1.269 return (uint64_t)vec; 1.270 } 1.271 1.272 @@ -657,7 +672,6 @@ static void generate_exirq(VCPU *vcpu) 1.273 uint64_t isr; 1.274 REGS *regs=vcpu_regs(vcpu); 1.275 vpsr.val = VCPU(vcpu, vpsr); 1.276 - update_vhpi(vcpu, NULL_VECTOR); 1.277 isr = vpsr.val & IA64_PSR_RI; 1.278 if ( !vpsr.ic ) 1.279 panic_domain(regs,"Interrupt when IC=0\n"); 1.280 @@ -669,7 +683,6 @@ void vhpi_detection(VCPU *vcpu) 1.281 uint64_t threshold,vhpi; 1.282 tpr_t vtpr; 1.283 IA64_PSR vpsr; 1.284 - 1.285 vpsr.val = VCPU(vcpu, vpsr); 1.286 vtpr.val = VCPU(vcpu, tpr); 1.287
2.1 --- a/xen/arch/ia64/vmx/vmx_process.c Sun Oct 22 14:14:58 2006 -0600 2.2 +++ b/xen/arch/ia64/vmx/vmx_process.c Sun Oct 22 14:39:15 2006 -0600 2.3 @@ -242,10 +242,7 @@ void leave_hypervisor_tail(struct pt_reg 2.4 vmx_check_pending_irq(v); 2.5 return; 2.6 } 2.7 - if (VCPU(v, vac).a_int) { 2.8 - vhpi_detection(v); 2.9 - return; 2.10 - } 2.11 + 2.12 if (v->arch.irq_new_condition) { 2.13 v->arch.irq_new_condition = 0; 2.14 vhpi_detection(v);
3.1 --- a/xen/arch/ia64/xen/xentime.c Sun Oct 22 14:14:58 2006 -0600 3.2 +++ b/xen/arch/ia64/xen/xentime.c Sun Oct 22 14:39:15 2006 -0600 3.3 @@ -109,7 +109,6 @@ void 3.4 xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) 3.5 { 3.6 unsigned long new_itm, old_itc; 3.7 - int f_setitm = 0; 3.8 3.9 #if 0 3.10 #define HEARTBEAT_FREQ 16 // period in seconds 3.11 @@ -125,20 +124,9 @@ xen_timer_interrupt (int irq, void *dev_ 3.12 #endif 3.13 #endif 3.14 3.15 - if (!is_idle_domain(current->domain)&&!VMX_DOMAIN(current)) 3.16 - if (vcpu_timer_expired(current)) { 3.17 - vcpu_pend_timer(current); 3.18 - // ensure another timer interrupt happens even if domain doesn't 3.19 - vcpu_set_next_timer(current); 3.20 - f_setitm = 1; 3.21 - } 3.22 3.23 new_itm = local_cpu_data->itm_next; 3.24 - 3.25 - if (f_setitm && !time_after(ia64_get_itc(), new_itm)) 3.26 - return; 3.27 - 3.28 - while (1) { 3.29 + while (time_after(ia64_get_itc(), new_itm)) { 3.30 new_itm += local_cpu_data->itm_delta; 3.31 3.32 if (smp_processor_id() == TIME_KEEPER_ID) { 3.33 @@ -148,27 +136,32 @@ xen_timer_interrupt (int irq, void *dev_ 3.34 * another CPU. We need to avoid to SMP race by acquiring the 3.35 * xtime_lock. 3.36 */ 3.37 -//#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN 3.38 write_seqlock(&xtime_lock); 3.39 -//#endif 3.40 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN 3.41 do_timer(regs); 3.42 #endif 3.43 - local_cpu_data->itm_next = new_itm; 3.44 - 3.45 - /* Updates system time (nanoseconds since boot). */ 3.46 + /* Updates system time (nanoseconds since boot). */ 3.47 old_itc = itc_at_irq; 3.48 itc_at_irq = ia64_get_itc(); 3.49 stime_irq += cycle_to_ns(itc_at_irq - old_itc); 3.50 3.51 -//#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN 3.52 write_sequnlock(&xtime_lock); 3.53 -//#endif 3.54 - } else 3.55 - local_cpu_data->itm_next = new_itm; 3.56 + } 3.57 + 3.58 + local_cpu_data->itm_next = new_itm; 3.59 + 3.60 + } 3.61 3.62 - if (time_after(new_itm, ia64_get_itc())) 3.63 - break; 3.64 + if (!is_idle_domain(current->domain) && !VMX_DOMAIN(current)) { 3.65 + if (vcpu_timer_expired(current)) { 3.66 + vcpu_pend_timer(current); 3.67 + } else { 3.68 + // ensure another timer interrupt happens 3.69 + // even if domain doesn't 3.70 + vcpu_set_next_timer(current); 3.71 + raise_softirq(TIMER_SOFTIRQ); 3.72 + return; 3.73 + } 3.74 } 3.75 3.76 do {