ia64/xen-unstable

changeset 19311:e6b7b747d122

passthrough: fix some spinlock issues in vmsi

Apart from efficiency, I hasten to fix the assertion failure.

- acquire pcidevs_lock before calling pt_irq_xxx_bind_vtd
- allocate msixtbl_entry beforehand
- check return value from domain_spin_lock_irq_desc()
- typo: spin_unlock(&irq_desc->lock) ->
- spin_unlock_irq(&irq_desc->lock)
- acquire msixtbl_list_lock with irq_disabled

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Mar 11 10:09:21 2009 +0000 (2009-03-11)
parents 07042b677ba4
children 2cd96ef83996
files xen/arch/x86/domctl.c xen/arch/x86/hvm/vmsi.c
line diff
     1.1 --- a/xen/arch/x86/domctl.c	Wed Mar 11 10:08:31 2009 +0000
     1.2 +++ b/xen/arch/x86/domctl.c	Wed Mar 11 10:09:21 2009 +0000
     1.3 @@ -764,7 +764,11 @@ long arch_do_domctl(
     1.4  
     1.5          ret = -ESRCH;
     1.6          if ( iommu_enabled )
     1.7 +        {
     1.8 +            spin_lock(&pcidevs_lock);
     1.9              ret = pt_irq_create_bind_vtd(d, bind);
    1.10 +            spin_unlock(&pcidevs_lock);
    1.11 +        }
    1.12          if ( ret < 0 )
    1.13              gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
    1.14  
    1.15 @@ -783,7 +787,11 @@ long arch_do_domctl(
    1.16              break;
    1.17          bind = &(domctl->u.bind_pt_irq);
    1.18          if ( iommu_enabled )
    1.19 +        {
    1.20 +            spin_lock(&pcidevs_lock);
    1.21              ret = pt_irq_destroy_bind_vtd(d, bind);
    1.22 +            spin_unlock(&pcidevs_lock);
    1.23 +        }
    1.24          if ( ret < 0 )
    1.25              gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n");
    1.26          rcu_unlock_domain(d);
     2.1 --- a/xen/arch/x86/hvm/vmsi.c	Wed Mar 11 10:08:31 2009 +0000
     2.2 +++ b/xen/arch/x86/hvm/vmsi.c	Wed Mar 11 10:09:21 2009 +0000
     2.3 @@ -336,17 +336,13 @@ struct hvm_mmio_handler msixtbl_mmio_han
     2.4      .write_handler = msixtbl_write
     2.5  };
     2.6  
     2.7 -static struct msixtbl_entry *add_msixtbl_entry(struct domain *d,
     2.8 -                                               struct pci_dev *pdev,
     2.9 -                                               uint64_t gtable)
    2.10 +static void add_msixtbl_entry(struct domain *d,
    2.11 +                              struct pci_dev *pdev,
    2.12 +                              uint64_t gtable,
    2.13 +                              struct msixtbl_entry *entry)
    2.14  {
    2.15 -    struct msixtbl_entry *entry;
    2.16      u32 len;
    2.17  
    2.18 -    entry = xmalloc(struct msixtbl_entry);
    2.19 -    if ( !entry )
    2.20 -        return NULL;
    2.21 -
    2.22      memset(entry, 0, sizeof(struct msixtbl_entry));
    2.23          
    2.24      INIT_LIST_HEAD(&entry->list);
    2.25 @@ -359,8 +355,6 @@ static struct msixtbl_entry *add_msixtbl
    2.26      entry->gtable = (unsigned long) gtable;
    2.27  
    2.28      list_add_rcu(&entry->list, &d->arch.hvm_domain.msixtbl_list);
    2.29 -
    2.30 -    return entry;
    2.31  }
    2.32  
    2.33  static void free_msixtbl_entry(struct rcu_head *rcu)
    2.34 @@ -383,12 +377,25 @@ int msixtbl_pt_register(struct domain *d
    2.35      irq_desc_t *irq_desc;
    2.36      struct msi_desc *msi_desc;
    2.37      struct pci_dev *pdev;
    2.38 -    struct msixtbl_entry *entry;
    2.39 +    struct msixtbl_entry *entry, *new_entry;
    2.40      int r = -EINVAL;
    2.41  
    2.42      ASSERT(spin_is_locked(&pcidevs_lock));
    2.43  
    2.44 +    /*
    2.45 +     * xmalloc() with irq_disabled causes the failure of check_lock() 
    2.46 +     * for xenpool->lock. So we allocate an entry beforehand.
    2.47 +     */
    2.48 +    new_entry = xmalloc(struct msixtbl_entry);
    2.49 +    if ( !new_entry )
    2.50 +        return -ENOMEM;
    2.51 +
    2.52      irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL);
    2.53 +    if ( !irq_desc )
    2.54 +    {
    2.55 +        xfree(new_entry);
    2.56 +        return r;
    2.57 +    }
    2.58  
    2.59      if ( irq_desc->handler != &pci_msi_type )
    2.60          goto out;
    2.61 @@ -405,12 +412,9 @@ int msixtbl_pt_register(struct domain *d
    2.62          if ( pdev == entry->pdev )
    2.63              goto found;
    2.64  
    2.65 -    entry = add_msixtbl_entry(d, pdev, gtable);
    2.66 -    if ( !entry )
    2.67 -    {
    2.68 -        spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
    2.69 -        goto out;
    2.70 -    }
    2.71 +    entry = new_entry;
    2.72 +    new_entry = NULL;
    2.73 +    add_msixtbl_entry(d, pdev, gtable, entry);
    2.74  
    2.75  found:
    2.76      atomic_inc(&entry->refcnt);
    2.77 @@ -419,8 +423,8 @@ found:
    2.78  
    2.79  out:
    2.80      spin_unlock_irq(&irq_desc->lock);
    2.81 +    xfree(new_entry);
    2.82      return r;
    2.83 -
    2.84  }
    2.85  
    2.86  void msixtbl_pt_unregister(struct domain *d, int pirq)
    2.87 @@ -433,6 +437,8 @@ void msixtbl_pt_unregister(struct domain
    2.88      ASSERT(spin_is_locked(&pcidevs_lock));
    2.89  
    2.90      irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL);
    2.91 +    if ( !irq_desc )
    2.92 +        return;
    2.93  
    2.94      if ( irq_desc->handler != &pci_msi_type )
    2.95          goto out;
    2.96 @@ -453,7 +459,7 @@ void msixtbl_pt_unregister(struct domain
    2.97  
    2.98  
    2.99  out:
   2.100 -    spin_unlock(&irq_desc->lock);
   2.101 +    spin_unlock_irq(&irq_desc->lock);
   2.102      return;
   2.103  
   2.104  found:
   2.105 @@ -461,13 +467,16 @@ found:
   2.106          del_msixtbl_entry(entry);
   2.107  
   2.108      spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
   2.109 -    spin_unlock(&irq_desc->lock);
   2.110 +    spin_unlock_irq(&irq_desc->lock);
   2.111  }
   2.112  
   2.113  void msixtbl_pt_cleanup(struct domain *d, int pirq)
   2.114  {
   2.115      struct msixtbl_entry *entry, *temp;
   2.116 +    unsigned long flags;
   2.117  
   2.118 +    /* msixtbl_list_lock must be acquired with irq_disabled for check_lock() */
   2.119 +    local_irq_save(flags); 
   2.120      spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
   2.121  
   2.122      list_for_each_entry_safe( entry, temp,
   2.123 @@ -475,4 +484,5 @@ void msixtbl_pt_cleanup(struct domain *d
   2.124          del_msixtbl_entry(entry);
   2.125  
   2.126      spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
   2.127 +    local_irq_restore(flags);
   2.128  }