ia64/xen-unstable
changeset 16348:4fd6610949f1
vt-d: Support intra-domain shared interrupt.
Inter-domain shared interrupt has been supported by timeout method,
but it still doesn't support intra-domain shared interrupt, that is
assigning multiple devices which share a physical irq to the same
domain. This patch implements intra-domain shared interrupt
support. In addition, this patch maps link to guest device/intx
instead of directly mapping isairq in pt_irq_create_bind_vtd(),
because at this point the isairqs got from pci_link are always 0.
Note that assigning multiple devices to guests which uses PIC to
handle interrupts may be failed, because different links possibly
connect to same irq.
Signed-off-by: Weidong Han <weidong.han@intel.com>
Inter-domain shared interrupt has been supported by timeout method,
but it still doesn't support intra-domain shared interrupt, that is
assigning multiple devices which share a physical irq to the same
domain. This patch implements intra-domain shared interrupt
support. In addition, this patch maps link to guest device/intx
instead of directly mapping isairq in pt_irq_create_bind_vtd(),
because at this point the isairqs got from pci_link are always 0.
Note that assigning multiple devices to guests which uses PIC to
handle interrupts may be failed, because different links possibly
connect to same irq.
Signed-off-by: Weidong Han <weidong.han@intel.com>
author | Keir Fraser <keir@xensource.com> |
---|---|
date | Wed Nov 07 15:20:06 2007 +0000 (2007-11-07) |
parents | 644e7577f6ee |
children | 4f1363491a77 |
files | xen/arch/x86/hvm/irq.c xen/arch/x86/hvm/vmx/intr.c xen/arch/x86/hvm/vmx/vtd/io.c xen/include/asm-x86/hvm/irq.h |
line diff
1.1 --- a/xen/arch/x86/hvm/irq.c Wed Nov 07 14:53:32 2007 +0000 1.2 +++ b/xen/arch/x86/hvm/irq.c Wed Nov 07 15:20:06 2007 +0000 1.3 @@ -192,15 +192,12 @@ void hvm_set_pci_link_route(struct domai 1.4 hvm_irq->pci_link.route[link] = isa_irq; 1.5 1.6 /* PCI pass-through fixup. */ 1.7 - if ( hvm_irq->dpci && hvm_irq->dpci->girq[old_isa_irq].valid ) 1.8 + if ( hvm_irq->dpci && hvm_irq->dpci->link[link].valid ) 1.9 { 1.10 - uint32_t device = hvm_irq->dpci->girq[old_isa_irq].device; 1.11 - uint32_t intx = hvm_irq->dpci->girq[old_isa_irq].intx; 1.12 - if ( link == hvm_pci_intx_link(device, intx) ) 1.13 - { 1.14 - hvm_irq->dpci->girq[isa_irq] = hvm_irq->dpci->girq[old_isa_irq]; 1.15 + hvm_irq->dpci->girq[isa_irq] = hvm_irq->dpci->link[link]; 1.16 + if ( hvm_irq->dpci->girq[old_isa_irq].device == 1.17 + hvm_irq->dpci->link[link].device ) 1.18 hvm_irq->dpci->girq[old_isa_irq].valid = 0; 1.19 - } 1.20 } 1.21 1.22 if ( hvm_irq->pci_link_assert_count[link] == 0 )
2.1 --- a/xen/arch/x86/hvm/vmx/intr.c Wed Nov 07 14:53:32 2007 +0000 2.2 +++ b/xen/arch/x86/hvm/vmx/intr.c Wed Nov 07 15:20:06 2007 +0000 2.3 @@ -113,6 +113,7 @@ static void vmx_dirq_assist(struct vcpu 2.4 uint32_t device, intx; 2.5 struct domain *d = v->domain; 2.6 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 2.7 + struct dev_intx_gsi *dig; 2.8 2.9 if ( !vtd_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) ) 2.10 return; 2.11 @@ -122,11 +123,17 @@ static void vmx_dirq_assist(struct vcpu 2.12 irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) ) 2.13 { 2.14 stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]); 2.15 + clear_bit(irq, &hvm_irq_dpci->dirq_mask); 2.16 2.17 - test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask); 2.18 - device = hvm_irq_dpci->mirq[irq].device; 2.19 - intx = hvm_irq_dpci->mirq[irq].intx; 2.20 - hvm_pci_intx_assert(d, device, intx); 2.21 + list_for_each_entry ( dig, &hvm_irq_dpci->mirq[irq].dig_list, list ) 2.22 + { 2.23 + device = dig->device; 2.24 + intx = dig->intx; 2.25 + hvm_pci_intx_assert(d, device, intx); 2.26 + spin_lock(&hvm_irq_dpci->dirq_lock); 2.27 + hvm_irq_dpci->mirq[irq].pending++; 2.28 + spin_unlock(&hvm_irq_dpci->dirq_lock); 2.29 + } 2.30 2.31 /* 2.32 * Set a timer to see if the guest can finish the interrupt or not. For
3.1 --- a/xen/arch/x86/hvm/vmx/vtd/io.c Wed Nov 07 14:53:32 2007 +0000 3.2 +++ b/xen/arch/x86/hvm/vmx/vtd/io.c Wed Nov 07 15:20:06 2007 +0000 3.3 @@ -47,14 +47,27 @@ 3.4 3.5 static void pt_irq_time_out(void *data) 3.6 { 3.7 - struct hvm_irq_dpci_mapping *irq_map = data; 3.8 - unsigned int guest_gsi, machine_gsi; 3.9 - struct domain *d = irq_map->dom; 3.10 + struct hvm_mirq_dpci_mapping *irq_map = data; 3.11 + unsigned int guest_gsi, machine_gsi = 0; 3.12 + struct hvm_irq_dpci *dpci = irq_map->dom->arch.hvm_domain.irq.dpci; 3.13 + struct dev_intx_gsi *dig; 3.14 + uint32_t device, intx; 3.15 3.16 - guest_gsi = irq_map->guest_gsi; 3.17 - machine_gsi = d->arch.hvm_domain.irq.dpci->girq[guest_gsi].machine_gsi; 3.18 - clear_bit(machine_gsi, d->arch.hvm_domain.irq.dpci->dirq_mask); 3.19 - hvm_dpci_eoi(irq_map->dom, guest_gsi, NULL); 3.20 + list_for_each_entry ( dig, &irq_map->dig_list, list ) 3.21 + { 3.22 + guest_gsi = dig->gsi; 3.23 + machine_gsi = dpci->girq[guest_gsi].machine_gsi; 3.24 + device = dig->device; 3.25 + intx = dig->intx; 3.26 + hvm_pci_intx_deassert(irq_map->dom, device, intx); 3.27 + } 3.28 + 3.29 + clear_bit(machine_gsi, dpci->dirq_mask); 3.30 + stop_timer(&dpci->hvm_timer[irq_to_vector(machine_gsi)]); 3.31 + spin_lock(&dpci->dirq_lock); 3.32 + dpci->mirq[machine_gsi].pending = 0; 3.33 + spin_unlock(&dpci->dirq_lock); 3.34 + pirq_guest_eoi(irq_map->dom, machine_gsi); 3.35 } 3.36 3.37 int pt_irq_create_bind_vtd( 3.38 @@ -62,8 +75,8 @@ int pt_irq_create_bind_vtd( 3.39 { 3.40 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 3.41 uint32_t machine_gsi, guest_gsi; 3.42 - uint32_t device, intx; 3.43 - uint32_t link, isa_irq; 3.44 + uint32_t device, intx, link; 3.45 + struct dev_intx_gsi *dig; 3.46 3.47 if ( hvm_irq_dpci == NULL ) 3.48 { 3.49 @@ -72,6 +85,9 @@ int pt_irq_create_bind_vtd( 3.50 return -ENOMEM; 3.51 3.52 memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci)); 3.53 + spin_lock_init(&hvm_irq_dpci->dirq_lock); 3.54 + for ( int i = 0; i < NR_IRQS; i++ ) 3.55 + INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].dig_list); 3.56 3.57 if ( cmpxchg((unsigned long *)&d->arch.hvm_domain.irq.dpci, 3.58 0, (unsigned long)hvm_irq_dpci) != 0 ) 3.59 @@ -85,35 +101,42 @@ int pt_irq_create_bind_vtd( 3.60 intx = pt_irq_bind->u.pci.intx; 3.61 guest_gsi = hvm_pci_intx_gsi(device, intx); 3.62 link = hvm_pci_intx_link(device, intx); 3.63 - isa_irq = d->arch.hvm_domain.irq.pci_link.route[link]; 3.64 + 3.65 + dig = xmalloc(struct dev_intx_gsi); 3.66 + if ( !dig ) 3.67 + return -ENOMEM; 3.68 3.69 - hvm_irq_dpci->mirq[machine_gsi].valid = 1; 3.70 - hvm_irq_dpci->mirq[machine_gsi].device = device; 3.71 - hvm_irq_dpci->mirq[machine_gsi].intx = intx; 3.72 - hvm_irq_dpci->mirq[machine_gsi].guest_gsi = guest_gsi; 3.73 - hvm_irq_dpci->mirq[machine_gsi].dom = d; 3.74 - 3.75 + dig->device = device; 3.76 + dig->intx = intx; 3.77 + dig->gsi = guest_gsi; 3.78 + list_add_tail(&dig->list, 3.79 + &hvm_irq_dpci->mirq[machine_gsi].dig_list); 3.80 + 3.81 hvm_irq_dpci->girq[guest_gsi].valid = 1; 3.82 hvm_irq_dpci->girq[guest_gsi].device = device; 3.83 hvm_irq_dpci->girq[guest_gsi].intx = intx; 3.84 hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi; 3.85 - hvm_irq_dpci->girq[guest_gsi].dom = d; 3.86 3.87 - hvm_irq_dpci->girq[isa_irq].valid = 1; 3.88 - hvm_irq_dpci->girq[isa_irq].device = device; 3.89 - hvm_irq_dpci->girq[isa_irq].intx = intx; 3.90 - hvm_irq_dpci->girq[isa_irq].machine_gsi = machine_gsi; 3.91 - hvm_irq_dpci->girq[isa_irq].dom = d; 3.92 + hvm_irq_dpci->link[link].valid = 1; 3.93 + hvm_irq_dpci->link[link].device = device; 3.94 + hvm_irq_dpci->link[link].intx = intx; 3.95 + hvm_irq_dpci->link[link].machine_gsi = machine_gsi; 3.96 3.97 - init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)], 3.98 - pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0); 3.99 + /* Bind the same mirq once in the same domain */ 3.100 + if ( !hvm_irq_dpci->mirq[machine_gsi].valid ) 3.101 + { 3.102 + hvm_irq_dpci->mirq[machine_gsi].valid = 1; 3.103 + hvm_irq_dpci->mirq[machine_gsi].dom = d; 3.104 3.105 - /* Deal with GSI for legacy devices. */ 3.106 - pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE); 3.107 - gdprintk(XENLOG_ERR, 3.108 - "XEN_DOMCTL_irq_mapping: m_irq = %x device = %x intx = %x\n", 3.109 + init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)], 3.110 + pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0); 3.111 + /* Deal with gsi for legacy devices */ 3.112 + pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE); 3.113 + } 3.114 + 3.115 + gdprintk(XENLOG_INFO, 3.116 + "VT-d irq bind: m_irq = %x device = %x intx = %x\n", 3.117 machine_gsi, device, intx); 3.118 - 3.119 return 0; 3.120 } 3.121 3.122 @@ -150,14 +173,22 @@ void hvm_dpci_eoi(struct domain *d, unsi 3.123 return; 3.124 3.125 machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi; 3.126 - stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]); 3.127 device = hvm_irq_dpci->girq[guest_gsi].device; 3.128 intx = hvm_irq_dpci->girq[guest_gsi].intx; 3.129 - gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: device %x intx %x\n", 3.130 - device, intx); 3.131 hvm_pci_intx_deassert(d, device, intx); 3.132 - if ( (ent == NULL) || !ent->fields.mask ) 3.133 - pirq_guest_eoi(d, machine_gsi); 3.134 + 3.135 + spin_lock(&hvm_irq_dpci->dirq_lock); 3.136 + if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 ) 3.137 + { 3.138 + spin_unlock(&hvm_irq_dpci->dirq_lock); 3.139 + 3.140 + gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: mirq = %x\n", machine_gsi); 3.141 + stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]); 3.142 + if ( (ent == NULL) || !ent->fields.mask ) 3.143 + pirq_guest_eoi(d, machine_gsi); 3.144 + } 3.145 + else 3.146 + spin_unlock(&hvm_irq_dpci->dirq_lock); 3.147 } 3.148 3.149 void iommu_domain_destroy(struct domain *d) 3.150 @@ -165,8 +196,9 @@ void iommu_domain_destroy(struct domain 3.151 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 3.152 uint32_t i; 3.153 struct hvm_iommu *hd = domain_hvm_iommu(d); 3.154 - struct list_head *ioport_list, *tmp; 3.155 + struct list_head *ioport_list, *dig_list, *tmp; 3.156 struct g2m_ioport *ioport; 3.157 + struct dev_intx_gsi *dig; 3.158 3.159 if ( !vtd_enabled ) 3.160 return; 3.161 @@ -178,7 +210,16 @@ void iommu_domain_destroy(struct domain 3.162 { 3.163 pirq_guest_unbind(d, i); 3.164 kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]); 3.165 + 3.166 + list_for_each_safe ( dig_list, tmp, 3.167 + &hvm_irq_dpci->mirq[i].dig_list ) 3.168 + { 3.169 + dig = list_entry(dig_list, struct dev_intx_gsi, list); 3.170 + list_del(&dig->list); 3.171 + xfree(dig); 3.172 + } 3.173 } 3.174 + 3.175 d->arch.hvm_domain.irq.dpci = NULL; 3.176 xfree(hvm_irq_dpci); 3.177 }
4.1 --- a/xen/include/asm-x86/hvm/irq.h Wed Nov 07 14:53:32 2007 +0000 4.2 +++ b/xen/include/asm-x86/hvm/irq.h Wed Nov 07 15:20:06 2007 +0000 4.3 @@ -30,22 +30,35 @@ 4.4 #include <asm/hvm/vioapic.h> 4.5 #include <public/hvm/save.h> 4.6 4.7 -struct hvm_irq_dpci_mapping { 4.8 +struct dev_intx_gsi { 4.9 + struct list_head list; 4.10 + uint8_t device; 4.11 + uint8_t intx; 4.12 + uint8_t gsi; 4.13 +}; 4.14 + 4.15 +struct hvm_mirq_dpci_mapping { 4.16 + uint8_t valid; 4.17 + int pending; 4.18 + struct list_head dig_list; 4.19 + struct domain *dom; 4.20 +}; 4.21 + 4.22 +struct hvm_girq_dpci_mapping { 4.23 uint8_t valid; 4.24 uint8_t device; 4.25 uint8_t intx; 4.26 - struct domain *dom; 4.27 - union { 4.28 - uint8_t guest_gsi; 4.29 - uint8_t machine_gsi; 4.30 - }; 4.31 + uint8_t machine_gsi; 4.32 }; 4.33 4.34 struct hvm_irq_dpci { 4.35 + spinlock_t dirq_lock; 4.36 /* Machine IRQ to guest device/intx mapping. */ 4.37 - struct hvm_irq_dpci_mapping mirq[NR_IRQS]; 4.38 + struct hvm_mirq_dpci_mapping mirq[NR_IRQS]; 4.39 /* Guest IRQ to guest device/intx mapping. */ 4.40 - struct hvm_irq_dpci_mapping girq[NR_IRQS]; 4.41 + struct hvm_girq_dpci_mapping girq[NR_IRQS]; 4.42 + /* Link to guest device/intx mapping. */ 4.43 + struct hvm_girq_dpci_mapping link[4]; 4.44 DECLARE_BITMAP(dirq_mask, NR_IRQS); 4.45 struct timer hvm_timer[NR_IRQS]; 4.46 };