ia64/xen-unstable
changeset 16160:e733e6b73d56
vt-d: Allow pass-through of shared interrupts.
Signed-off-by: Xiaohui Xin <xiaohui.xin@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Xiaohui Xin <xiaohui.xin@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author | Keir Fraser <keir@xensource.com> |
---|---|
date | Fri Oct 19 11:26:58 2007 +0100 (2007-10-19) |
parents | bc4afcd4c612 |
children | bf3514726c1b |
files | xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c xen/arch/x86/hvm/vioapic.c xen/arch/x86/hvm/vmx/intr.c xen/arch/x86/hvm/vmx/vtd/dmar.c xen/arch/x86/hvm/vmx/vtd/io.c xen/arch/x86/hvm/vpic.c xen/include/asm-x86/hvm/io.h xen/include/asm-x86/hvm/irq.h xen/include/asm-x86/iommu.h |
line diff
1.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c Fri Oct 19 09:31:03 2007 +0100 1.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c Fri Oct 19 11:26:58 2007 +0100 1.3 @@ -18,10 +18,10 @@ 1.4 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 1.5 */ 1.6 1.7 +#include <xen/sched.h> 1.8 #include <asm/hvm/iommu.h> 1.9 #include <asm/amd-iommu.h> 1.10 #include <asm/hvm/svm/amd-iommu-proto.h> 1.11 -#include <xen/sched.h> 1.12 1.13 extern long amd_iommu_poll_comp_wait; 1.14
2.1 --- a/xen/arch/x86/hvm/vioapic.c Fri Oct 19 09:31:03 2007 +0100 2.2 +++ b/xen/arch/x86/hvm/vioapic.c Fri Oct 19 11:26:58 2007 +0100 2.3 @@ -459,7 +459,7 @@ void vioapic_update_EOI(struct domain *d 2.4 ent->fields.remote_irr = 0; 2.5 2.6 if ( vtd_enabled ) 2.7 - hvm_dpci_eoi(gsi, ent); 2.8 + hvm_dpci_eoi(current->domain, gsi, ent); 2.9 2.10 if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) && 2.11 !ent->fields.mask &&
3.1 --- a/xen/arch/x86/hvm/vmx/intr.c Fri Oct 19 09:31:03 2007 +0100 3.2 +++ b/xen/arch/x86/hvm/vmx/intr.c Fri Oct 19 11:26:58 2007 +0100 3.3 @@ -121,10 +121,22 @@ static void vmx_dirq_assist(struct vcpu 3.4 irq < NR_IRQS; 3.5 irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) ) 3.6 { 3.7 + stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]); 3.8 + 3.9 test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask); 3.10 device = hvm_irq_dpci->mirq[irq].device; 3.11 intx = hvm_irq_dpci->mirq[irq].intx; 3.12 hvm_pci_intx_assert(d, device, intx); 3.13 + 3.14 + /* 3.15 + * Set a timer to see if the guest can finish the interrupt or not. For 3.16 + * example, the guest OS may unmask the PIC during boot, before the 3.17 + * guest driver is loaded. hvm_pci_intx_assert() may succeed, but the 3.18 + * guest will never deal with the irq, then the physical interrupt line 3.19 + * will never be deasserted. 3.20 + */ 3.21 + set_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)], 3.22 + NOW() + PT_IRQ_TIME_OUT); 3.23 } 3.24 } 3.25
4.1 --- a/xen/arch/x86/hvm/vmx/vtd/dmar.c Fri Oct 19 09:31:03 2007 +0100 4.2 +++ b/xen/arch/x86/hvm/vmx/vtd/dmar.c Fri Oct 19 11:26:58 2007 +0100 4.3 @@ -492,7 +492,6 @@ acpi_parse_dmar(unsigned long phys_addr, 4.4 4.5 int acpi_dmar_init(void) 4.6 { 4.7 - extern int ioapic_ack_new; 4.8 int rc; 4.9 4.10 if (!vtd_enabled) 4.11 @@ -509,8 +508,5 @@ int acpi_dmar_init(void) 4.12 return -ENODEV; 4.13 } 4.14 4.15 - /* Use fake-vector style of IOAPIC acknowledgement. */ 4.16 - ioapic_ack_new = 0; 4.17 - 4.18 return 0; 4.19 }
5.1 --- a/xen/arch/x86/hvm/vmx/vtd/io.c Fri Oct 19 09:31:03 2007 +0100 5.2 +++ b/xen/arch/x86/hvm/vmx/vtd/io.c Fri Oct 19 11:26:58 2007 +0100 5.3 @@ -45,6 +45,18 @@ 5.4 #include <public/hvm/ioreq.h> 5.5 #include <public/domctl.h> 5.6 5.7 +static void pt_irq_time_out(void *data) 5.8 +{ 5.9 + struct hvm_irq_dpci_mapping *irq_map = data; 5.10 + unsigned int guest_gsi, machine_gsi; 5.11 + struct domain *d = irq_map->dom; 5.12 + 5.13 + guest_gsi = irq_map->guest_gsi; 5.14 + machine_gsi = d->arch.hvm_domain.irq.dpci->girq[guest_gsi].machine_gsi; 5.15 + clear_bit(machine_gsi, d->arch.hvm_domain.irq.dpci->dirq_mask); 5.16 + hvm_dpci_eoi(irq_map->dom, guest_gsi, NULL); 5.17 +} 5.18 + 5.19 int pt_irq_create_bind_vtd( 5.20 struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind) 5.21 { 5.22 @@ -76,17 +88,22 @@ int pt_irq_create_bind_vtd( 5.23 hvm_irq_dpci->mirq[machine_gsi].device = device; 5.24 hvm_irq_dpci->mirq[machine_gsi].intx = intx; 5.25 hvm_irq_dpci->mirq[machine_gsi].guest_gsi = guest_gsi; 5.26 + hvm_irq_dpci->mirq[machine_gsi].dom = d; 5.27 5.28 hvm_irq_dpci->girq[guest_gsi].valid = 1; 5.29 hvm_irq_dpci->girq[guest_gsi].device = device; 5.30 hvm_irq_dpci->girq[guest_gsi].intx = intx; 5.31 hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi; 5.32 + hvm_irq_dpci->girq[guest_gsi].dom = d; 5.33 5.34 - /* Deal with gsi for legacy devices */ 5.35 + init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)], 5.36 + pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0); 5.37 + 5.38 + /* Deal with GSI for legacy devices. */ 5.39 pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE); 5.40 gdprintk(XENLOG_ERR, 5.41 - "XEN_DOMCTL_irq_mapping: m_irq = %x device = %x intx = %x\n", 5.42 - machine_gsi, device, intx); 5.43 + "XEN_DOMCTL_irq_mapping: m_irq = %x device = %x intx = %x\n", 5.44 + machine_gsi, device, intx); 5.45 5.46 return 0; 5.47 } 5.48 @@ -114,22 +131,25 @@ int hvm_do_IRQ_dpci(struct domain *d, un 5.49 hvm_irq->dpci->girq[isa_irq].machine_gsi = mirq; 5.50 } 5.51 5.52 - if ( !test_and_set_bit(mirq, hvm_irq->dpci->dirq_mask) ) 5.53 - { 5.54 - vcpu_kick(d->vcpu[0]); 5.55 - return 1; 5.56 - } 5.57 + /* 5.58 + * Set a timer here to avoid situations where the IRQ line is shared, and 5.59 + * the device belonging to the pass-through guest is not yet active. In 5.60 + * this case the guest may not pick up the interrupt (e.g., masked at the 5.61 + * PIC) and we need to detect that. 5.62 + */ 5.63 + set_bit(mirq, hvm_irq->dpci->dirq_mask); 5.64 + set_timer(&hvm_irq->dpci->hvm_timer[irq_to_vector(mirq)], 5.65 + NOW() + PT_IRQ_TIME_OUT); 5.66 + vcpu_kick(d->vcpu[0]); 5.67 5.68 - dprintk(XENLOG_INFO, "mirq already pending\n"); 5.69 - return 0; 5.70 + return 1; 5.71 } 5.72 5.73 -void hvm_dpci_eoi(unsigned int guest_gsi, union vioapic_redir_entry *ent) 5.74 +void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi, 5.75 + union vioapic_redir_entry *ent) 5.76 { 5.77 - struct domain *d = current->domain; 5.78 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 5.79 uint32_t device, intx, machine_gsi; 5.80 - irq_desc_t *desc; 5.81 5.82 ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock)); 5.83 5.84 @@ -137,17 +157,15 @@ void hvm_dpci_eoi(unsigned int guest_gsi 5.85 !hvm_irq_dpci->girq[guest_gsi].valid ) 5.86 return; 5.87 5.88 + machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi; 5.89 + stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]); 5.90 device = hvm_irq_dpci->girq[guest_gsi].device; 5.91 intx = hvm_irq_dpci->girq[guest_gsi].intx; 5.92 - machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi; 5.93 gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: device %x intx %x\n", 5.94 device, intx); 5.95 __hvm_pci_intx_deassert(d, device, intx); 5.96 - if ( (ent == NULL) || (ent->fields.mask == 0) ) 5.97 - { 5.98 - desc = &irq_desc[irq_to_vector(machine_gsi)]; 5.99 - desc->handler->end(irq_to_vector(machine_gsi)); 5.100 - } 5.101 + if ( (ent == NULL) || !ent->fields.mask ) 5.102 + pirq_guest_eoi(d, machine_gsi); 5.103 } 5.104 5.105 void iommu_domain_destroy(struct domain *d)
6.1 --- a/xen/arch/x86/hvm/vpic.c Fri Oct 19 09:31:03 2007 +0100 6.2 +++ b/xen/arch/x86/hvm/vpic.c Fri Oct 19 11:26:58 2007 +0100 6.3 @@ -252,7 +252,8 @@ static void vpic_ioport_write( 6.4 if ( vtd_enabled ) 6.5 { 6.6 irq |= ((addr & 0xa0) == 0xa0) ? 8 : 0; 6.7 - hvm_dpci_eoi(hvm_isa_irq_to_gsi(irq), NULL); 6.8 + hvm_dpci_eoi(current->domain, 6.9 + hvm_isa_irq_to_gsi(irq), NULL); 6.10 } 6.11 break; 6.12 case 6: /* Set Priority */
7.1 --- a/xen/include/asm-x86/hvm/io.h Fri Oct 19 09:31:03 2007 +0100 7.2 +++ b/xen/include/asm-x86/hvm/io.h Fri Oct 19 11:26:58 2007 +0100 7.3 @@ -151,7 +151,8 @@ void send_invalidate_req(void); 7.4 extern void handle_mmio(unsigned long gpa); 7.5 extern void hvm_interrupt_post(struct vcpu *v, int vector, int type); 7.6 extern void hvm_io_assist(void); 7.7 -extern void hvm_dpci_eoi(unsigned int guest_irq, union vioapic_redir_entry *ent); 7.8 +extern void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq, 7.9 + union vioapic_redir_entry *ent); 7.10 7.11 #endif /* __ASM_X86_HVM_IO_H__ */ 7.12
8.1 --- a/xen/include/asm-x86/hvm/irq.h Fri Oct 19 09:31:03 2007 +0100 8.2 +++ b/xen/include/asm-x86/hvm/irq.h Fri Oct 19 11:26:58 2007 +0100 8.3 @@ -33,6 +33,7 @@ struct hvm_irq_dpci_mapping { 8.4 uint8_t valid; 8.5 uint8_t device; 8.6 uint8_t intx; 8.7 + struct domain *dom; 8.8 union { 8.9 uint8_t guest_gsi; 8.10 uint8_t machine_gsi; 8.11 @@ -45,6 +46,7 @@ struct hvm_irq_dpci { 8.12 /* Guest IRQ to guest device/intx mapping. */ 8.13 struct hvm_irq_dpci_mapping girq[NR_IRQS]; 8.14 DECLARE_BITMAP(dirq_mask, NR_IRQS); 8.15 + struct timer hvm_timer[NR_IRQS]; 8.16 }; 8.17 8.18 struct hvm_irq {
9.1 --- a/xen/include/asm-x86/iommu.h Fri Oct 19 09:31:03 2007 +0100 9.2 +++ b/xen/include/asm-x86/iommu.h Fri Oct 19 11:26:58 2007 +0100 9.3 @@ -79,7 +79,8 @@ void iommu_domain_teardown(struct domain 9.4 int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq); 9.5 int dpci_ioport_intercept(ioreq_t *p); 9.6 int pt_irq_create_bind_vtd(struct domain *d, 9.7 - xen_domctl_bind_pt_irq_t * pt_irq_bind); 9.8 + xen_domctl_bind_pt_irq_t *pt_irq_bind); 9.9 9.10 +#define PT_IRQ_TIME_OUT MILLISECS(8) 9.11 9.12 -#endif // _IOMMU_H_ 9.13 +#endif /* _IOMMU_H_ */