ia64/xen-unstable

changeset 18601:a11ad61bdb5b

Fix lock issue for hvm pass-through domain

This patch protect the hvm_irq_dpci structure with evtchn_lock, thus
the access to domain's pirq_vector mapping is also protected.

Signed-off-by: Jiang, Yunhong <yunhong.jiang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Oct 09 11:14:52 2008 +0100 (2008-10-09)
parents b8f329d2c074
children 0033c944318f
files xen/arch/x86/hvm/svm/intr.c xen/arch/x86/hvm/vmsi.c xen/arch/x86/hvm/vmx/intr.c xen/arch/x86/irq.c xen/drivers/passthrough/io.c xen/drivers/passthrough/pci.c xen/drivers/passthrough/vtd/x86/vtd.c xen/include/asm-x86/hvm/irq.h xen/include/xen/irq.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/intr.c	Thu Oct 09 11:08:13 2008 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Thu Oct 09 11:14:52 2008 +0100
     1.3 @@ -124,9 +124,11 @@ static void svm_dirq_assist(struct vcpu 
     1.4          if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
     1.5              continue;
     1.6  
     1.7 +        spin_lock(&d->evtchn_lock);
     1.8          if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
     1.9          {
    1.10              hvm_pci_msi_assert(d, irq);
    1.11 +            spin_unlock(&d->evtchn_lock);
    1.12              continue;
    1.13          }
    1.14  
    1.15 @@ -137,9 +139,7 @@ static void svm_dirq_assist(struct vcpu 
    1.16              device = digl->device;
    1.17              intx = digl->intx;
    1.18              hvm_pci_intx_assert(d, device, intx);
    1.19 -            spin_lock(&hvm_irq_dpci->dirq_lock);
    1.20              hvm_irq_dpci->mirq[irq].pending++;
    1.21 -            spin_unlock(&hvm_irq_dpci->dirq_lock);
    1.22          }
    1.23  
    1.24          /*
    1.25 @@ -151,6 +151,7 @@ static void svm_dirq_assist(struct vcpu 
    1.26           */
    1.27          set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
    1.28                    NOW() + PT_IRQ_TIME_OUT);
    1.29 +        spin_unlock(&d->evtchn_lock);
    1.30      }
    1.31  }
    1.32  
     2.1 --- a/xen/arch/x86/hvm/vmsi.c	Thu Oct 09 11:08:13 2008 +0100
     2.2 +++ b/xen/arch/x86/hvm/vmsi.c	Thu Oct 09 11:14:52 2008 +0100
     2.3 @@ -134,7 +134,7 @@ int vmsi_deliver(struct domain *d, int p
     2.4                  "vector=%x trig_mode=%x\n",
     2.5                  dest, dest_mode, delivery_mode, vector, trig_mode);
     2.6  
     2.7 -    if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
     2.8 +    if ( !test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags) )
     2.9      {
    2.10          gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
    2.11          return 0;
     3.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Thu Oct 09 11:08:13 2008 +0100
     3.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Thu Oct 09 11:14:52 2008 +0100
     3.3 @@ -127,11 +127,13 @@ static void vmx_dirq_assist(struct vcpu 
     3.4          if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
     3.5              continue;
     3.6  
     3.7 -		if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
     3.8 -		{
     3.9 -			hvm_pci_msi_assert(d, irq);
    3.10 -			continue;
    3.11 -		}
    3.12 +        spin_lock(&d->evtchn_lock);
    3.13 +        if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
    3.14 +        {
    3.15 +            hvm_pci_msi_assert(d, irq);
    3.16 +            spin_unlock(&d->evtchn_lock);
    3.17 +            continue;
    3.18 +        }
    3.19  
    3.20          stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
    3.21  
    3.22 @@ -140,9 +142,7 @@ static void vmx_dirq_assist(struct vcpu 
    3.23              device = digl->device;
    3.24              intx = digl->intx;
    3.25              hvm_pci_intx_assert(d, device, intx);
    3.26 -            spin_lock(&hvm_irq_dpci->dirq_lock);
    3.27              hvm_irq_dpci->mirq[irq].pending++;
    3.28 -            spin_unlock(&hvm_irq_dpci->dirq_lock);
    3.29          }
    3.30  
    3.31          /*
    3.32 @@ -154,6 +154,7 @@ static void vmx_dirq_assist(struct vcpu 
    3.33           */
    3.34          set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
    3.35                    NOW() + PT_IRQ_TIME_OUT);
    3.36 +        spin_unlock(&d->evtchn_lock);
    3.37      }
    3.38  }
    3.39  
     4.1 --- a/xen/arch/x86/irq.c	Thu Oct 09 11:08:13 2008 +0100
     4.2 +++ b/xen/arch/x86/irq.c	Thu Oct 09 11:14:52 2008 +0100
     4.3 @@ -285,7 +285,7 @@ static void __do_IRQ_guest(int vector)
     4.4   * The descriptor is returned locked. This function is safe against changes
     4.5   * to the per-domain irq-to-vector mapping.
     4.6   */
     4.7 -static irq_desc_t *domain_spin_lock_irq_desc(
     4.8 +irq_desc_t *domain_spin_lock_irq_desc(
     4.9      struct domain *d, int irq, unsigned long *pflags)
    4.10  {
    4.11      unsigned int vector;
     5.1 --- a/xen/drivers/passthrough/io.c	Thu Oct 09 11:08:13 2008 +0100
     5.2 +++ b/xen/drivers/passthrough/io.c	Thu Oct 09 11:14:52 2008 +0100
     5.3 @@ -26,10 +26,14 @@ static void pt_irq_time_out(void *data)
     5.4      struct hvm_mirq_dpci_mapping *irq_map = data;
     5.5      unsigned int guest_gsi, machine_gsi = 0;
     5.6      int vector;
     5.7 -    struct hvm_irq_dpci *dpci = domain_get_irq_dpci(irq_map->dom);
     5.8 +    struct hvm_irq_dpci *dpci = NULL;
     5.9      struct dev_intx_gsi_link *digl;
    5.10      uint32_t device, intx;
    5.11  
    5.12 +    spin_lock(&irq_map->dom->evtchn_lock);
    5.13 +
    5.14 +    dpci = domain_get_irq_dpci(irq_map->dom);
    5.15 +    ASSERT(dpci);
    5.16      list_for_each_entry ( digl, &irq_map->digl_list, list )
    5.17      {
    5.18          guest_gsi = digl->gsi;
    5.19 @@ -41,55 +45,65 @@ static void pt_irq_time_out(void *data)
    5.20  
    5.21      clear_bit(machine_gsi, dpci->dirq_mask);
    5.22      vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
    5.23 -    stop_timer(&dpci->hvm_timer[vector]);
    5.24 -    spin_lock(&dpci->dirq_lock);
    5.25      dpci->mirq[machine_gsi].pending = 0;
    5.26 -    spin_unlock(&dpci->dirq_lock);
    5.27 +    spin_unlock(&irq_map->dom->evtchn_lock);
    5.28      pirq_guest_eoi(irq_map->dom, machine_gsi);
    5.29  }
    5.30  
    5.31  int pt_irq_create_bind_vtd(
    5.32      struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
    5.33  {
    5.34 -    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
    5.35 +    struct hvm_irq_dpci *hvm_irq_dpci = NULL;
    5.36      uint32_t machine_gsi, guest_gsi;
    5.37      uint32_t device, intx, link;
    5.38      struct dev_intx_gsi_link *digl;
    5.39 +    int pirq = pt_irq_bind->machine_irq;
    5.40  
    5.41 +    if ( pirq < 0 || pirq >= NR_PIRQS )
    5.42 +        return -EINVAL;
    5.43 +
    5.44 +    spin_lock(&d->evtchn_lock);
    5.45 +
    5.46 +    hvm_irq_dpci = domain_get_irq_dpci(d);
    5.47      if ( hvm_irq_dpci == NULL )
    5.48      {
    5.49          hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
    5.50          if ( hvm_irq_dpci == NULL )
    5.51 +        {
    5.52 +            spin_unlock(&d->evtchn_lock);
    5.53              return -ENOMEM;
    5.54 -
    5.55 +        }
    5.56          memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
    5.57 -        spin_lock_init(&hvm_irq_dpci->dirq_lock);
    5.58          for ( int i = 0; i < NR_IRQS; i++ )
    5.59              INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
    5.60 +    }
    5.61  
    5.62 -        if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
    5.63 -            xfree(hvm_irq_dpci);
    5.64 +    if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
    5.65 +    {
    5.66 +        xfree(hvm_irq_dpci);
    5.67 +        spin_unlock(&d->evtchn_lock);
    5.68 +        return -EINVAL;
    5.69      }
    5.70  
    5.71      if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
    5.72      {
    5.73 -        int pirq = pt_irq_bind->machine_irq;
    5.74  
    5.75 -        if ( pirq < 0 || pirq >= NR_IRQS )
    5.76 -            return -EINVAL;
    5.77 -
    5.78 -        if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID ) )
    5.79 +        if ( !test_and_set_bit(pirq, hvm_irq_dpci->mapping))
    5.80          {
    5.81 -            hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |
    5.82 -                                              HVM_IRQ_DPCI_MSI ;
    5.83 +            set_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags);
    5.84 +            hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
    5.85 +            hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
    5.86 +            hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
    5.87 +            /* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
    5.88              pirq_guest_bind(d->vcpu[0], pirq, 0);
    5.89          }
    5.90 +        else if (hvm_irq_dpci->mirq[pirq].gmsi.gvec != pt_irq_bind->u.msi.gvec
    5.91 +                ||hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] != pirq)
    5.92  
    5.93 -        hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |HVM_IRQ_DPCI_MSI ;
    5.94 -        hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
    5.95 -        hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
    5.96 -        hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
    5.97 -
    5.98 +        {
    5.99 +            spin_unlock(&d->evtchn_lock);
   5.100 +            return -EBUSY;
   5.101 +        }
   5.102      }
   5.103      else
   5.104      {
   5.105 @@ -102,7 +116,10 @@ int pt_irq_create_bind_vtd(
   5.106  
   5.107          digl = xmalloc(struct dev_intx_gsi_link);
   5.108          if ( !digl )
   5.109 +        {
   5.110 +            spin_unlock(&d->evtchn_lock);
   5.111              return -ENOMEM;
   5.112 +        }
   5.113  
   5.114          digl->device = device;
   5.115          digl->intx = intx;
   5.116 @@ -117,11 +134,11 @@ int pt_irq_create_bind_vtd(
   5.117          hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
   5.118  
   5.119          /* Bind the same mirq once in the same domain */
   5.120 -        if ( !(hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
   5.121 +        if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping))
   5.122          {
   5.123 -            hvm_irq_dpci->mirq[machine_gsi].flags |= HVM_IRQ_DPCI_VALID;
   5.124              hvm_irq_dpci->mirq[machine_gsi].dom = d;
   5.125  
   5.126 +            /* Init timer before binding */
   5.127              init_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)],
   5.128                         pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
   5.129              /* Deal with gsi for legacy devices */
   5.130 @@ -132,37 +149,45 @@ int pt_irq_create_bind_vtd(
   5.131                   "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
   5.132                   machine_gsi, device, intx);
   5.133      }
   5.134 +    spin_unlock(&d->evtchn_lock);
   5.135      return 0;
   5.136  }
   5.137  
   5.138  int pt_irq_destroy_bind_vtd(
   5.139      struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
   5.140  {
   5.141 -    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
   5.142 +    struct hvm_irq_dpci *hvm_irq_dpci = NULL;
   5.143      uint32_t machine_gsi, guest_gsi;
   5.144      uint32_t device, intx, link;
   5.145      struct list_head *digl_list, *tmp;
   5.146      struct dev_intx_gsi_link *digl;
   5.147  
   5.148 -    if ( hvm_irq_dpci == NULL )
   5.149 -        return 0;
   5.150 -
   5.151      machine_gsi = pt_irq_bind->machine_irq;
   5.152      device = pt_irq_bind->u.pci.device;
   5.153      intx = pt_irq_bind->u.pci.intx;
   5.154      guest_gsi = hvm_pci_intx_gsi(device, intx);
   5.155      link = hvm_pci_intx_link(device, intx);
   5.156 -    hvm_irq_dpci->link_cnt[link]--;
   5.157  
   5.158      gdprintk(XENLOG_INFO,
   5.159               "pt_irq_destroy_bind_vtd: machine_gsi=%d "
   5.160               "guest_gsi=%d, device=%d, intx=%d.\n",
   5.161               machine_gsi, guest_gsi, device, intx);
   5.162 +    spin_lock(&d->evtchn_lock);
   5.163 +
   5.164 +    hvm_irq_dpci = domain_get_irq_dpci(d);
   5.165 +
   5.166 +    if ( hvm_irq_dpci == NULL )
   5.167 +    {
   5.168 +        spin_unlock(&d->evtchn_lock);
   5.169 +        return -EINVAL;
   5.170 +    }
   5.171 +
   5.172 +    hvm_irq_dpci->link_cnt[link]--;
   5.173      memset(&hvm_irq_dpci->girq[guest_gsi], 0,
   5.174             sizeof(struct hvm_girq_dpci_mapping));
   5.175  
   5.176      /* clear the mirq info */
   5.177 -    if ( (hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
   5.178 +    if ( test_bit(machine_gsi, hvm_irq_dpci->mapping))
   5.179      {
   5.180          list_for_each_safe ( digl_list, tmp,
   5.181                  &hvm_irq_dpci->mirq[machine_gsi].digl_list )
   5.182 @@ -185,9 +210,10 @@ int pt_irq_destroy_bind_vtd(
   5.183              kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)]);
   5.184              hvm_irq_dpci->mirq[machine_gsi].dom   = NULL;
   5.185              hvm_irq_dpci->mirq[machine_gsi].flags = 0;
   5.186 +            clear_bit(machine_gsi, hvm_irq_dpci->mapping);
   5.187          }
   5.188      }
   5.189 -
   5.190 +    spin_unlock(&d->evtchn_lock);
   5.191      gdprintk(XENLOG_INFO,
   5.192               "XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
   5.193               machine_gsi, device, intx);
   5.194 @@ -199,8 +225,9 @@ int hvm_do_IRQ_dpci(struct domain *d, un
   5.195  {
   5.196      struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
   5.197  
   5.198 +    ASSERT(spin_is_locked(&irq_desc[domain_irq_to_vector(d, mirq)].lock));
   5.199      if ( !iommu_enabled || (d == dom0) || !dpci ||
   5.200 -         !dpci->mirq[mirq].flags & HVM_IRQ_DPCI_VALID )
   5.201 +         !test_bit(mirq, dpci->mapping))
   5.202          return 0;
   5.203  
   5.204      /*
   5.205 @@ -218,44 +245,46 @@ int hvm_do_IRQ_dpci(struct domain *d, un
   5.206      return 1;
   5.207  }
   5.208  
   5.209 -
   5.210  void hvm_dpci_msi_eoi(struct domain *d, int vector)
   5.211  {
   5.212      struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   5.213 +    irq_desc_t *desc;
   5.214      int pirq;
   5.215 -    unsigned long flags;
   5.216 -    irq_desc_t *desc;
   5.217  
   5.218      if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
   5.219         return;
   5.220  
   5.221 +    spin_lock(&d->evtchn_lock);
   5.222      pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
   5.223  
   5.224      if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) &&
   5.225 -         (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID) &&
   5.226 -         (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
   5.227 -    {
   5.228 -        int vec;
   5.229 -        vec = domain_irq_to_vector(d, pirq);
   5.230 -        desc = &irq_desc[vec];
   5.231 +          test_bit(pirq, hvm_irq_dpci->mapping) &&
   5.232 +         (test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags)))
   5.233 +     {
   5.234 +         BUG_ON(!local_irq_is_enabled());
   5.235 +         desc = domain_spin_lock_irq_desc(d, pirq, NULL);
   5.236 +         if (!desc)
   5.237 +         {
   5.238 +            spin_unlock(&d->evtchn_lock);
   5.239 +            return;
   5.240 +         }
   5.241  
   5.242 -        spin_lock_irqsave(&desc->lock, flags);
   5.243 -        desc->status &= ~IRQ_INPROGRESS;
   5.244 -        spin_unlock_irqrestore(&desc->lock, flags);
   5.245 +         desc->status &= ~IRQ_INPROGRESS;
   5.246 +         spin_unlock_irq(&desc->lock);
   5.247  
   5.248 -        pirq_guest_eoi(d, pirq);
   5.249 -    }
   5.250 +         pirq_guest_eoi(d, pirq);
   5.251 +     }
   5.252 +
   5.253 +    spin_unlock(&d->evtchn_lock);
   5.254  }
   5.255  
   5.256  void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
   5.257                    union vioapic_redir_entry *ent)
   5.258  {
   5.259 -    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
   5.260 +    struct hvm_irq_dpci *hvm_irq_dpci = NULL;
   5.261      uint32_t device, intx, machine_gsi;
   5.262  
   5.263 -    if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
   5.264 -         (guest_gsi >= NR_ISAIRQS &&
   5.265 -          !hvm_irq_dpci->girq[guest_gsi].valid) )
   5.266 +    if ( !iommu_enabled)
   5.267          return;
   5.268  
   5.269      if ( guest_gsi < NR_ISAIRQS )
   5.270 @@ -264,23 +293,34 @@ void hvm_dpci_eoi(struct domain *d, unsi
   5.271          return;
   5.272      }
   5.273  
   5.274 -    machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
   5.275 +    spin_lock(&d->evtchn_lock);
   5.276 +    hvm_irq_dpci = domain_get_irq_dpci(d);
   5.277 +
   5.278 +    if((hvm_irq_dpci == NULL) ||
   5.279 +         (guest_gsi >= NR_ISAIRQS &&
   5.280 +          !hvm_irq_dpci->girq[guest_gsi].valid) )
   5.281 +    {
   5.282 +        spin_unlock(&d->evtchn_lock);
   5.283 +        return;
   5.284 +    }
   5.285 +
   5.286      device = hvm_irq_dpci->girq[guest_gsi].device;
   5.287      intx = hvm_irq_dpci->girq[guest_gsi].intx;
   5.288      hvm_pci_intx_deassert(d, device, intx);
   5.289  
   5.290 -    spin_lock(&hvm_irq_dpci->dirq_lock);
   5.291 +    machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
   5.292      if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
   5.293      {
   5.294 -        spin_unlock(&hvm_irq_dpci->dirq_lock);
   5.295 -
   5.296          if ( (ent == NULL) || !ent->fields.mask )
   5.297          {
   5.298 +            /*
   5.299 +             * No need to get vector lock for timer
   5.300 +             * since interrupt is still not EOIed
   5.301 +             */
   5.302              stop_timer(&hvm_irq_dpci->hvm_timer[
   5.303                  domain_irq_to_vector(d, machine_gsi)]);
   5.304              pirq_guest_eoi(d, machine_gsi);
   5.305          }
   5.306      }
   5.307 -    else
   5.308 -        spin_unlock(&hvm_irq_dpci->dirq_lock);
   5.309 +    spin_unlock(&d->evtchn_lock);
   5.310  }
     6.1 --- a/xen/drivers/passthrough/pci.c	Thu Oct 09 11:08:13 2008 +0100
     6.2 +++ b/xen/drivers/passthrough/pci.c	Thu Oct 09 11:14:52 2008 +0100
     6.3 @@ -154,7 +154,7 @@ int pci_remove_device(u8 bus, u8 devfn)
     6.4  
     6.5  static void pci_clean_dpci_irqs(struct domain *d)
     6.6  {
     6.7 -    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
     6.8 +    struct hvm_irq_dpci *hvm_irq_dpci = NULL;
     6.9      uint32_t i;
    6.10      struct list_head *digl_list, *tmp;
    6.11      struct dev_intx_gsi_link *digl;
    6.12 @@ -165,13 +165,14 @@ static void pci_clean_dpci_irqs(struct d
    6.13      if ( !is_hvm_domain(d) && !need_iommu(d) )
    6.14          return;
    6.15  
    6.16 +    spin_lock(&d->evtchn_lock);
    6.17 +    hvm_irq_dpci = domain_get_irq_dpci(d);
    6.18      if ( hvm_irq_dpci != NULL )
    6.19      {
    6.20 -        for ( i = 0; i < NR_IRQS; i++ )
    6.21 +        for ( i = find_first_bit(hvm_irq_dpci->mapping, NR_PIRQS);
    6.22 +              i < NR_PIRQS;
    6.23 +              i = find_next_bit(hvm_irq_dpci->mapping, NR_PIRQS, i + 1) )
    6.24          {
    6.25 -            if ( !(hvm_irq_dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID) )
    6.26 -                continue;
    6.27 -
    6.28              pirq_guest_unbind(d, i);
    6.29              kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
    6.30  
    6.31 @@ -188,6 +189,7 @@ static void pci_clean_dpci_irqs(struct d
    6.32          d->arch.hvm_domain.irq.dpci = NULL;
    6.33          xfree(hvm_irq_dpci);
    6.34      }
    6.35 +    spin_unlock(&d->evtchn_lock);
    6.36  }
    6.37  
    6.38  void pci_release_devices(struct domain *d)
     7.1 --- a/xen/drivers/passthrough/vtd/x86/vtd.c	Thu Oct 09 11:08:13 2008 +0100
     7.2 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c	Thu Oct 09 11:14:52 2008 +0100
     7.3 @@ -85,37 +85,41 @@ int domain_set_irq_dpci(struct domain *d
     7.4  void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
     7.5  {
     7.6      struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
     7.7 -    struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
     7.8 +    struct hvm_irq_dpci *dpci = NULL;
     7.9      struct dev_intx_gsi_link *digl, *tmp;
    7.10      int i;
    7.11  
    7.12      ASSERT(isairq < NR_ISAIRQS);
    7.13 -    if ( !vtd_enabled || !dpci ||
    7.14 -         !test_bit(isairq, dpci->isairq_map) )
    7.15 +    if ( !vtd_enabled)
    7.16          return;
    7.17  
    7.18 -    /* Multiple mirq may be mapped to one isa irq */
    7.19 -    for ( i = 0; i < NR_IRQS; i++ )
    7.20 +    spin_lock(&d->evtchn_lock);
    7.21 +
    7.22 +    dpci = domain_get_irq_dpci(d);
    7.23 +
    7.24 +    if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
    7.25      {
    7.26 -        if ( !dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID )
    7.27 -            continue;
    7.28 -
    7.29 +        spin_unlock(&d->evtchn_lock);
    7.30 +        return;
    7.31 +    }
    7.32 +    /* Multiple mirq may be mapped to one isa irq */
    7.33 +    for ( i = find_first_bit(dpci->mapping, NR_PIRQS);
    7.34 +          i < NR_PIRQS;
    7.35 +          i = find_next_bit(dpci->mapping, NR_PIRQS, i + 1) )
    7.36 +    {
    7.37          list_for_each_entry_safe ( digl, tmp,
    7.38              &dpci->mirq[i].digl_list, list )
    7.39          {
    7.40              if ( hvm_irq->pci_link.route[digl->link] == isairq )
    7.41              {
    7.42                  hvm_pci_intx_deassert(d, digl->device, digl->intx);
    7.43 -                spin_lock(&dpci->dirq_lock);
    7.44                  if ( --dpci->mirq[i].pending == 0 )
    7.45                  {
    7.46 -                    spin_unlock(&dpci->dirq_lock);
    7.47                      stop_timer(&dpci->hvm_timer[domain_irq_to_vector(d, i)]);
    7.48                      pirq_guest_eoi(d, i);
    7.49                  }
    7.50 -                else
    7.51 -                    spin_unlock(&dpci->dirq_lock);
    7.52              }
    7.53          }
    7.54      }
    7.55 +    spin_unlock(&d->evtchn_lock);
    7.56  }
     8.1 --- a/xen/include/asm-x86/hvm/irq.h	Thu Oct 09 11:08:13 2008 +0100
     8.2 +++ b/xen/include/asm-x86/hvm/irq.h	Thu Oct 09 11:14:52 2008 +0100
     8.3 @@ -25,6 +25,7 @@
     8.4  #include <xen/types.h>
     8.5  #include <xen/spinlock.h>
     8.6  #include <asm/irq.h>
     8.7 +#include <asm/pirq.h>
     8.8  #include <asm/hvm/hvm.h>
     8.9  #include <asm/hvm/vpic.h>
    8.10  #include <asm/hvm/vioapic.h>
    8.11 @@ -38,8 +39,6 @@ struct dev_intx_gsi_link {
    8.12      uint8_t link;
    8.13  };
    8.14  
    8.15 -#define HVM_IRQ_DPCI_VALID 0x1
    8.16 -#define HVM_IRQ_DPCI_MSI   0x2
    8.17  #define _HVM_IRQ_DPCI_MSI  0x1
    8.18  
    8.19  struct hvm_gmsi_info {
    8.20 @@ -64,9 +63,10 @@ struct hvm_girq_dpci_mapping {
    8.21  
    8.22  #define NR_ISAIRQS  16
    8.23  #define NR_LINK     4
    8.24 +/* Protected by domain's evtchn_lock */
    8.25  struct hvm_irq_dpci {
    8.26 -    spinlock_t dirq_lock;
    8.27      /* Machine IRQ to guest device/intx mapping. */
    8.28 +    DECLARE_BITMAP(mapping, NR_PIRQS);
    8.29      struct hvm_mirq_dpci_mapping mirq[NR_IRQS];
    8.30      /* Guest IRQ to guest device/intx mapping. */
    8.31      struct hvm_girq_dpci_mapping girq[NR_IRQS];
     9.1 --- a/xen/include/xen/irq.h	Thu Oct 09 11:08:13 2008 +0100
     9.2 +++ b/xen/include/xen/irq.h	Thu Oct 09 11:14:52 2008 +0100
     9.3 @@ -78,6 +78,8 @@ extern int pirq_guest_eoi(struct domain 
     9.4  extern int pirq_guest_unmask(struct domain *d);
     9.5  extern int pirq_guest_bind(struct vcpu *v, int irq, int will_share);
     9.6  extern void pirq_guest_unbind(struct domain *d, int irq);
     9.7 +extern irq_desc_t *domain_spin_lock_irq_desc(
     9.8 +    struct domain *d, int irq, unsigned long *pflags);
     9.9  
    9.10  static inline void set_native_irq_info(int irq, cpumask_t mask)
    9.11  {