ia64/xen-unstable
changeset 10695:6703fed8870f
[IA64] enable acceleration of external interrupt
This patch is to enable acceleration of externel interrupt
which is described in VTI spec.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
This patch is to enable acceleration of externel interrupt
which is described in VTI spec.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author | awilliam@xenbuild.aw |
---|---|
date | Wed Jul 12 13:20:15 2006 -0600 (2006-07-12) |
parents | 79a5833d1266 |
children | 000789c36d28 |
files | xen/arch/ia64/vmx/pal_emul.c xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_virt.c xen/include/asm-ia64/vmx_vcpu.h |
line diff
1.1 --- a/xen/arch/ia64/vmx/pal_emul.c Tue Jul 11 12:56:27 2006 -0600 1.2 +++ b/xen/arch/ia64/vmx/pal_emul.c Wed Jul 12 13:20:15 2006 -0600 1.3 @@ -146,7 +146,7 @@ static struct ia64_pal_retval 1.4 pal_halt_light(VCPU *vcpu) { 1.5 struct ia64_pal_retval result; 1.6 1.7 - if (SPURIOUS_VECTOR == vmx_check_pending_irq(vcpu)) 1.8 + if (!is_unmasked_irq(vcpu)) 1.9 do_sched_op_compat(SCHEDOP_block, 0); 1.10 1.11 INIT_PAL_STATUS_SUCCESS(result);
2.1 --- a/xen/arch/ia64/vmx/vlsapic.c Tue Jul 11 12:56:27 2006 -0600 2.2 +++ b/xen/arch/ia64/vmx/vlsapic.c Wed Jul 12 13:20:15 2006 -0600 2.3 @@ -289,7 +289,7 @@ static void update_vhpi(VCPU *vcpu, int 2.4 vhpi = 16; 2.5 } 2.6 else { 2.7 - vhpi = vec / 16; 2.8 + vhpi = vec >> 4; 2.9 } 2.10 2.11 VCPU(vcpu,vhpi) = vhpi; 2.12 @@ -436,7 +436,7 @@ static int highest_inservice_irq(VCPU *v 2.13 */ 2.14 static int is_higher_irq(int pending, int inservice) 2.15 { 2.16 - return ( (pending >> 4) > (inservice>>4) || 2.17 + return ( (pending > inservice) || 2.18 ((pending != NULL_VECTOR) && (inservice == NULL_VECTOR)) ); 2.19 } 2.20 2.21 @@ -460,7 +460,6 @@ static int 2.22 _xirq_masked(VCPU *vcpu, int h_pending, int h_inservice) 2.23 { 2.24 tpr_t vtpr; 2.25 - uint64_t mmi; 2.26 2.27 vtpr.val = VCPU(vcpu, tpr); 2.28 2.29 @@ -474,9 +473,9 @@ static int 2.30 if ( h_inservice == ExtINT_VECTOR ) { 2.31 return IRQ_MASKED_BY_INSVC; 2.32 } 2.33 - mmi = vtpr.mmi; 2.34 + 2.35 if ( h_pending == ExtINT_VECTOR ) { 2.36 - if ( mmi ) { 2.37 + if ( vtpr.mmi ) { 2.38 // mask all external IRQ 2.39 return IRQ_MASKED_BY_VTPR; 2.40 } 2.41 @@ -486,7 +485,7 @@ static int 2.42 } 2.43 2.44 if ( is_higher_irq(h_pending, h_inservice) ) { 2.45 - if ( !mmi && is_higher_class(h_pending, vtpr.mic) ) { 2.46 + if ( is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)) ) { 2.47 return IRQ_NO_MASKED; 2.48 } 2.49 else { 2.50 @@ -577,6 +576,8 @@ int vmx_check_pending_irq(VCPU *vcpu) 2.51 isr = vpsr.val & IA64_PSR_RI; 2.52 if ( !vpsr.ic ) 2.53 panic_domain(regs,"Interrupt when IC=0\n"); 2.54 + if (VCPU(vcpu, vhpi)) 2.55 + update_vhpi(vcpu, NULL_VECTOR); 2.56 vmx_reflect_interruption(0,isr,0, 12, regs ); // EXT IRQ 2.57 } 2.58 else if ( mask == IRQ_MASKED_BY_INSVC ) { 2.59 @@ -612,6 +613,20 @@ void guest_write_eoi(VCPU *vcpu) 2.60 // vmx_check_pending_irq(vcpu); 2.61 } 2.62 2.63 +int is_unmasked_irq(VCPU *vcpu) 2.64 +{ 2.65 + int h_pending, h_inservice; 2.66 + 2.67 + h_pending = highest_pending_irq(vcpu); 2.68 + h_inservice = highest_inservice_irq(vcpu); 2.69 + if ( h_pending == NULL_VECTOR || 2.70 + irq_masked(vcpu, h_pending, h_inservice) != IRQ_NO_MASKED ) { 2.71 + return 0; 2.72 + } 2.73 + else 2.74 + return 1; 2.75 +} 2.76 + 2.77 uint64_t guest_read_vivr(VCPU *vcpu) 2.78 { 2.79 int vec, h_inservice; 2.80 @@ -628,7 +643,8 @@ uint64_t guest_read_vivr(VCPU *vcpu) 2.81 2.82 VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63)); 2.83 VCPU(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63)); 2.84 - update_vhpi(vcpu, NULL_VECTOR); // clear VHPI till EOI or IRR write 2.85 + if (VCPU(vcpu, vhpi)) 2.86 + update_vhpi(vcpu, NULL_VECTOR); // clear VHPI till EOI or IRR write 2.87 local_irq_restore(spsr); 2.88 return (uint64_t)vec; 2.89 }
3.1 --- a/xen/arch/ia64/vmx/vmx_init.c Tue Jul 11 12:56:27 2006 -0600 3.2 +++ b/xen/arch/ia64/vmx/vmx_init.c Wed Jul 12 13:20:15 2006 -0600 3.3 @@ -183,7 +183,8 @@ static vpd_t *alloc_vpd(void) 3.4 mregs->vac.a_from_cpuid = 1; 3.5 mregs->vac.a_cover = 1; 3.6 mregs->vac.a_bsw = 1; 3.7 - 3.8 + mregs->vac.a_int = 1; 3.9 + 3.10 mregs->vdc.d_vmsw = 1; 3.11 3.12 return vpd;
4.1 --- a/xen/arch/ia64/vmx/vmx_process.c Tue Jul 11 12:56:27 2006 -0600 4.2 +++ b/xen/arch/ia64/vmx/vmx_process.c Wed Jul 12 13:20:15 2006 -0600 4.3 @@ -35,7 +35,7 @@ 4.4 #include <asm/io.h> 4.5 #include <asm/processor.h> 4.6 #include <asm/desc.h> 4.7 -//#include <asm/ldt.h> 4.8 +#include <asm/vlsapic.h> 4.9 #include <xen/irq.h> 4.10 #include <xen/event.h> 4.11 #include <asm/regionreg.h> 4.12 @@ -188,13 +188,13 @@ void leave_hypervisor_tail(struct pt_reg 4.13 struct vcpu *v = current; 4.14 // FIXME: Will this work properly if doing an RFI??? 4.15 if (!is_idle_domain(d) ) { // always comes from guest 4.16 - extern void vmx_dorfirfi(void); 4.17 - struct pt_regs *user_regs = vcpu_regs(current); 4.18 +// struct pt_regs *user_regs = vcpu_regs(current); 4.19 + local_irq_enable(); 4.20 do_softirq(); 4.21 local_irq_disable(); 4.22 4.23 - if (user_regs != regs) 4.24 - printk("WARNING: checking pending interrupt in nested interrupt!!!\n"); 4.25 +// if (user_regs != regs) 4.26 +// printk("WARNING: checking pending interrupt in nested interrupt!!!\n"); 4.27 4.28 /* VMX Domain N has other interrupt source, saying DM */ 4.29 if (test_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags)) 4.30 @@ -215,12 +215,18 @@ void leave_hypervisor_tail(struct pt_reg 4.31 4.32 if ( v->arch.irq_new_pending ) { 4.33 v->arch.irq_new_pending = 0; 4.34 + v->arch.irq_new_condition = 0; 4.35 vmx_check_pending_irq(v); 4.36 + return; 4.37 } 4.38 -// if (VCPU(v,vac).a_bsw){ 4.39 -// save_banked_regs_to_vpd(v,regs); 4.40 -// } 4.41 - 4.42 + if (VCPU(v, vac).a_int) { 4.43 + vhpi_detection(v); 4.44 + return; 4.45 + } 4.46 + if (v->arch.irq_new_condition) { 4.47 + v->arch.irq_new_condition = 0; 4.48 + vhpi_detection(v); 4.49 + } 4.50 } 4.51 } 4.52
5.1 --- a/xen/arch/ia64/vmx/vmx_virt.c Tue Jul 11 12:56:27 2006 -0600 5.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c Wed Jul 12 13:20:15 2006 -0600 5.3 @@ -30,7 +30,6 @@ 5.4 #include <asm/vmx.h> 5.5 #include <asm/virt_event.h> 5.6 #include <asm/vmx_phy_mode.h> 5.7 -extern void vhpi_detection(VCPU *vcpu);//temporarily place here,need a header file. 5.8 5.9 void 5.10 ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64 * cause) 5.11 @@ -1342,14 +1341,6 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcp 5.12 } 5.13 5.14 5.15 -static void post_emulation_action(VCPU *vcpu) 5.16 -{ 5.17 - if ( vcpu->arch.irq_new_condition ) { 5.18 - vcpu->arch.irq_new_condition = 0; 5.19 - vhpi_detection(vcpu); 5.20 - } 5.21 -} 5.22 - 5.23 //#define BYPASS_VMAL_OPCODE 5.24 extern IA64_SLOT_TYPE slot_types[0x20][3]; 5.25 IA64_BUNDLE __vmx_get_domain_bundle(u64 iip) 5.26 @@ -1552,8 +1543,6 @@ if ( (cause == 0xff && opcode == 0x1e000 5.27 } 5.28 5.29 recover_if_physical_mode(vcpu); 5.30 - post_emulation_action (vcpu); 5.31 -//TODO set_irq_check(v); 5.32 return; 5.33 5.34 }
6.1 --- a/xen/include/asm-ia64/vmx_vcpu.h Tue Jul 11 12:56:27 2006 -0600 6.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h Wed Jul 12 13:20:15 2006 -0600 6.3 @@ -103,6 +103,7 @@ extern void vtm_interruption_update(VCPU 6.4 extern void vlsapic_reset(VCPU *vcpu); 6.5 extern int vmx_check_pending_irq(VCPU *vcpu); 6.6 extern void guest_write_eoi(VCPU *vcpu); 6.7 +extern int is_unmasked_irq(VCPU *vcpu); 6.8 extern uint64_t guest_read_vivr(VCPU *vcpu); 6.9 extern void vmx_inject_vhpi(VCPU *vcpu, u8 vec); 6.10 extern int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector);