ia64/xen-unstable

changeset 16276:007ff84be506

vt-d: Do dpci eoi outside of irq_lock.

Deadlock may occur if do hvm_dpci_eoi() inside of irq_lock on MP
platform. For example, there are two physical cpus. If interrupt is
injected on cpu0, but vcpu is migrated to cpu1 and it does eoi inside
of irq_lock, then IPI will be issued to cpu0. At the same time, cpu0
may have disabled irq and is acquiring the same irq_lock. In addition,
current code cannot guarantee do hvm_dpci_eoi() inside of irq_lock
when timeout. This patch does hvm_dpci_eoi() outside of irq_lock, and
solves above problems.

Signed-off-by: Xiaohui Xin <xiaohui.xin@intel.com>
Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Tue Oct 30 10:39:52 2007 +0000 (2007-10-30)
parents 26fb702fd8cf
children 4b4c75cb6c0f
files xen/arch/x86/hvm/irq.c xen/arch/x86/hvm/vioapic.c xen/arch/x86/hvm/vmx/vtd/io.c xen/arch/x86/hvm/vpic.c xen/include/asm-x86/hvm/irq.h
line diff
     1.1 --- a/xen/arch/x86/hvm/irq.c	Tue Oct 30 10:17:40 2007 +0000
     1.2 +++ b/xen/arch/x86/hvm/irq.c	Tue Oct 30 10:39:52 2007 +0000
     1.3 @@ -26,7 +26,7 @@
     1.4  #include <asm/hvm/domain.h>
     1.5  #include <asm/hvm/support.h>
     1.6  
     1.7 -void __hvm_pci_intx_assert(
     1.8 +static void __hvm_pci_intx_assert(
     1.9      struct domain *d, unsigned int device, unsigned int intx)
    1.10  {
    1.11      struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
    1.12 @@ -59,7 +59,7 @@ void hvm_pci_intx_assert(
    1.13      spin_unlock(&d->arch.hvm_domain.irq_lock);
    1.14  }
    1.15  
    1.16 -void __hvm_pci_intx_deassert(
    1.17 +static void __hvm_pci_intx_deassert(
    1.18      struct domain *d, unsigned int device, unsigned int intx)
    1.19  {
    1.20      struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
     2.1 --- a/xen/arch/x86/hvm/vioapic.c	Tue Oct 30 10:17:40 2007 +0000
     2.2 +++ b/xen/arch/x86/hvm/vioapic.c	Tue Oct 30 10:39:52 2007 +0000
     2.3 @@ -459,7 +459,11 @@ void vioapic_update_EOI(struct domain *d
     2.4      ent->fields.remote_irr = 0;
     2.5  
     2.6      if ( vtd_enabled )
     2.7 +    {
     2.8 +        spin_unlock(&d->arch.hvm_domain.irq_lock);
     2.9          hvm_dpci_eoi(current->domain, gsi, ent);
    2.10 +        spin_lock(&d->arch.hvm_domain.irq_lock);
    2.11 +    }
    2.12  
    2.13      if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
    2.14           !ent->fields.mask &&
     3.1 --- a/xen/arch/x86/hvm/vmx/vtd/io.c	Tue Oct 30 10:17:40 2007 +0000
     3.2 +++ b/xen/arch/x86/hvm/vmx/vtd/io.c	Tue Oct 30 10:39:52 2007 +0000
     3.3 @@ -145,8 +145,6 @@ void hvm_dpci_eoi(struct domain *d, unsi
     3.4      struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
     3.5      uint32_t device, intx, machine_gsi;
     3.6  
     3.7 -    ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
     3.8 -
     3.9      if ( !vtd_enabled || (hvm_irq_dpci == NULL) ||
    3.10           !hvm_irq_dpci->girq[guest_gsi].valid )
    3.11          return;
    3.12 @@ -157,7 +155,7 @@ void hvm_dpci_eoi(struct domain *d, unsi
    3.13      intx = hvm_irq_dpci->girq[guest_gsi].intx;
    3.14      gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: device %x intx %x\n",
    3.15               device, intx);
    3.16 -    __hvm_pci_intx_deassert(d, device, intx);
    3.17 +    hvm_pci_intx_deassert(d, device, intx);
    3.18      if ( (ent == NULL) || !ent->fields.mask )
    3.19          pirq_guest_eoi(d, machine_gsi);
    3.20  }
     4.1 --- a/xen/arch/x86/hvm/vpic.c	Tue Oct 30 10:17:40 2007 +0000
     4.2 +++ b/xen/arch/x86/hvm/vpic.c	Tue Oct 30 10:39:52 2007 +0000
     4.3 @@ -249,13 +249,13 @@ static void vpic_ioport_write(
     4.4                  vpic->isr &= ~(1 << irq);
     4.5                  if ( cmd == 7 )
     4.6                      vpic->priority_add = (irq + 1) & 7;
     4.7 -                if ( vtd_enabled )
     4.8 -                {
     4.9 -                    irq |= ((addr & 0xa0) == 0xa0) ? 8 : 0;
    4.10 -                    hvm_dpci_eoi(current->domain,
    4.11 -                                 hvm_isa_irq_to_gsi(irq), NULL);
    4.12 -                }
    4.13 -                break;
    4.14 +                /* Release lock and EOI the physical interrupt (if any). */
    4.15 +                vpic_update_int_output(vpic);
    4.16 +                vpic_unlock(vpic);
    4.17 +                hvm_dpci_eoi(current->domain,
    4.18 +                             hvm_isa_irq_to_gsi((addr >> 7) ? (irq|8) : irq),
    4.19 +                             NULL);
    4.20 +                return; /* bail immediately */
    4.21              case 6: /* Set Priority                */
    4.22                  vpic->priority_add = (val + 1) & 7;
    4.23                  break;
     5.1 --- a/xen/include/asm-x86/hvm/irq.h	Tue Oct 30 10:17:40 2007 +0000
     5.2 +++ b/xen/include/asm-x86/hvm/irq.h	Tue Oct 30 10:39:52 2007 +0000
     5.3 @@ -121,12 +121,8 @@ struct hvm_irq {
     5.4  #define hvm_isa_irq_to_gsi(isa_irq) ((isa_irq) ? : 2)
     5.5  
     5.6  /* Modify state of a PCI INTx wire. */
     5.7 -void __hvm_pci_intx_assert(
     5.8 -    struct domain *d, unsigned int device, unsigned int intx);
     5.9  void hvm_pci_intx_assert(
    5.10      struct domain *d, unsigned int device, unsigned int intx);
    5.11 -void __hvm_pci_intx_deassert(
    5.12 -    struct domain *d, unsigned int device, unsigned int intx);
    5.13  void hvm_pci_intx_deassert(
    5.14      struct domain *d, unsigned int device, unsigned int intx);
    5.15