ia64/xen-unstable

changeset 19234:4771bceb1889

AMD IOMMU: clean up spinlock usage to satisfy check_lock().

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Feb 20 11:05:17 2009 +0000 (2009-02-20)
parents 84af3ded5b02
children 596f21d901f3
files xen/drivers/passthrough/amd/iommu_init.c xen/drivers/passthrough/amd/iommu_intr.c xen/drivers/passthrough/amd/iommu_map.c xen/drivers/passthrough/amd/pci_amd_iommu.c
line diff
     1.1 --- a/xen/drivers/passthrough/amd/iommu_init.c	Thu Feb 19 11:07:33 2009 +0000
     1.2 +++ b/xen/drivers/passthrough/amd/iommu_init.c	Fri Feb 20 11:05:17 2009 +0000
     1.3 @@ -37,9 +37,6 @@ struct ivrs_mappings *ivrs_mappings;
     1.4  struct list_head amd_iommu_head;
     1.5  struct table_struct device_table;
     1.6  
     1.7 -extern void *int_remap_table;
     1.8 -extern spinlock_t int_remap_table_lock;
     1.9 -
    1.10  static int __init map_iommu_mmio_region(struct amd_iommu *iommu)
    1.11  {
    1.12      unsigned long mfn;
     2.1 --- a/xen/drivers/passthrough/amd/iommu_intr.c	Thu Feb 19 11:07:33 2009 +0000
     2.2 +++ b/xen/drivers/passthrough/amd/iommu_intr.c	Fri Feb 20 11:05:17 2009 +0000
     2.3 @@ -23,7 +23,7 @@
     2.4  #include <asm/hvm/svm/amd-iommu-proto.h>
     2.5  
     2.6  #define INTREMAP_TABLE_ORDER    1
     2.7 -DEFINE_SPINLOCK(int_remap_table_lock);
     2.8 +static DEFINE_SPINLOCK(int_remap_table_lock);
     2.9  void *int_remap_table = NULL;
    2.10  
    2.11  static u8 *get_intremap_entry(u8 vector, u8 dm)
    2.12 @@ -110,21 +110,13 @@ static void update_intremap_entry_from_i
    2.13  
    2.14  int __init amd_iommu_setup_intremap_table(void)
    2.15  {
    2.16 -    unsigned long flags;
    2.17 -
    2.18 -    spin_lock_irqsave(&int_remap_table_lock, flags);
    2.19 -
    2.20      if ( int_remap_table == NULL )
    2.21      {
    2.22          int_remap_table = __alloc_amd_iommu_tables(INTREMAP_TABLE_ORDER);
    2.23          if ( int_remap_table == NULL )
    2.24 -        {
    2.25 -            spin_unlock_irqrestore(&int_remap_table_lock, flags);
    2.26              return -ENOMEM;
    2.27 -        }
    2.28          memset(int_remap_table, 0, PAGE_SIZE * (1UL << INTREMAP_TABLE_ORDER));
    2.29      }
    2.30 -    spin_unlock_irqrestore(&int_remap_table_lock, flags);
    2.31  
    2.32      return 0;
    2.33  }
    2.34 @@ -210,15 +202,11 @@ void amd_iommu_msi_msg_update_ire(
    2.35  
    2.36  int __init deallocate_intremap_table(void)
    2.37  {
    2.38 -    unsigned long flags;
    2.39 -
    2.40 -    spin_lock_irqsave(&int_remap_table_lock, flags);
    2.41      if ( int_remap_table )
    2.42      {
    2.43          __free_amd_iommu_tables(int_remap_table, INTREMAP_TABLE_ORDER);
    2.44          int_remap_table = NULL;
    2.45      }
    2.46 -    spin_unlock_irqrestore(&int_remap_table_lock, flags);
    2.47  
    2.48      return 0;
    2.49  }
     3.1 --- a/xen/drivers/passthrough/amd/iommu_map.c	Thu Feb 19 11:07:33 2009 +0000
     3.2 +++ b/xen/drivers/passthrough/amd/iommu_map.c	Fri Feb 20 11:05:17 2009 +0000
     3.3 @@ -446,14 +446,13 @@ static u64 iommu_l2e_from_pfn(struct pag
     3.4  int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
     3.5  {
     3.6      u64 iommu_l2e;
     3.7 -    unsigned long flags;
     3.8      struct hvm_iommu *hd = domain_hvm_iommu(d);
     3.9      int iw = IOMMU_IO_WRITE_ENABLED;
    3.10      int ir = IOMMU_IO_READ_ENABLED;
    3.11  
    3.12      BUG_ON( !hd->root_table );
    3.13  
    3.14 -    spin_lock_irqsave(&hd->mapping_lock, flags);
    3.15 +    spin_lock(&hd->mapping_lock);
    3.16  
    3.17      if ( is_hvm_domain(d) && !hd->p2m_synchronized )
    3.18          goto out;
    3.19 @@ -461,14 +460,14 @@ int amd_iommu_map_page(struct domain *d,
    3.20      iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
    3.21      if ( iommu_l2e == 0 )
    3.22      {
    3.23 -        spin_unlock_irqrestore(&hd->mapping_lock, flags);
    3.24 +        spin_unlock(&hd->mapping_lock);
    3.25          amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
    3.26          return -EFAULT;
    3.27      }
    3.28      set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
    3.29  
    3.30  out:
    3.31 -    spin_unlock_irqrestore(&hd->mapping_lock, flags);
    3.32 +    spin_unlock(&hd->mapping_lock);
    3.33      return 0;
    3.34  }
    3.35  
    3.36 @@ -481,11 +480,11 @@ int amd_iommu_unmap_page(struct domain *
    3.37  
    3.38      BUG_ON( !hd->root_table );
    3.39  
    3.40 -    spin_lock_irqsave(&hd->mapping_lock, flags);
    3.41 +    spin_lock(&hd->mapping_lock);
    3.42  
    3.43      if ( is_hvm_domain(d) && !hd->p2m_synchronized )
    3.44      {
    3.45 -        spin_unlock_irqrestore(&hd->mapping_lock, flags);
    3.46 +        spin_unlock(&hd->mapping_lock);
    3.47          return 0;
    3.48      }
    3.49  
    3.50 @@ -493,14 +492,14 @@ int amd_iommu_unmap_page(struct domain *
    3.51  
    3.52      if ( iommu_l2e == 0 )
    3.53      {
    3.54 -        spin_unlock_irqrestore(&hd->mapping_lock, flags);
    3.55 +        spin_unlock(&hd->mapping_lock);
    3.56          amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
    3.57          return -EFAULT;
    3.58      }
    3.59  
    3.60      /* mark PTE as 'page not present' */
    3.61      clear_iommu_l1e_present(iommu_l2e, gfn);
    3.62 -    spin_unlock_irqrestore(&hd->mapping_lock, flags);
    3.63 +    spin_unlock(&hd->mapping_lock);
    3.64  
    3.65      /* send INVALIDATE_IOMMU_PAGES command */
    3.66      for_each_amd_iommu ( iommu )
    3.67 @@ -520,12 +519,12 @@ int amd_iommu_reserve_domain_unity_map(
    3.68      unsigned long size, int iw, int ir)
    3.69  {
    3.70      u64 iommu_l2e;
    3.71 -    unsigned long flags, npages, i;
    3.72 +    unsigned long npages, i;
    3.73      struct hvm_iommu *hd = domain_hvm_iommu(domain);
    3.74  
    3.75      npages = region_to_pages(phys_addr, size);
    3.76  
    3.77 -    spin_lock_irqsave(&hd->mapping_lock, flags);
    3.78 +    spin_lock(&hd->mapping_lock);
    3.79      for ( i = 0; i < npages; ++i )
    3.80      {
    3.81          iommu_l2e = iommu_l2e_from_pfn(
    3.82 @@ -533,7 +532,7 @@ int amd_iommu_reserve_domain_unity_map(
    3.83  
    3.84          if ( iommu_l2e == 0 )
    3.85          {
    3.86 -            spin_unlock_irqrestore(&hd->mapping_lock, flags);
    3.87 +            spin_unlock(&hd->mapping_lock);
    3.88              amd_iov_error("Invalid IO pagetable entry phys_addr = %lx\n",
    3.89                            phys_addr);
    3.90              return -EFAULT;
    3.91 @@ -544,13 +543,13 @@ int amd_iommu_reserve_domain_unity_map(
    3.92  
    3.93          phys_addr += PAGE_SIZE;
    3.94      }
    3.95 -    spin_unlock_irqrestore(&hd->mapping_lock, flags);
    3.96 +    spin_unlock(&hd->mapping_lock);
    3.97      return 0;
    3.98  }
    3.99  
   3.100  int amd_iommu_sync_p2m(struct domain *d)
   3.101  {
   3.102 -    unsigned long mfn, gfn, flags;
   3.103 +    unsigned long mfn, gfn;
   3.104      u64 iommu_l2e;
   3.105      struct page_info *page;
   3.106      struct hvm_iommu *hd;
   3.107 @@ -562,7 +561,7 @@ int amd_iommu_sync_p2m(struct domain *d)
   3.108  
   3.109      hd = domain_hvm_iommu(d);
   3.110  
   3.111 -    spin_lock_irqsave(&hd->mapping_lock, flags);
   3.112 +    spin_lock(&hd->mapping_lock);
   3.113  
   3.114      if ( hd->p2m_synchronized )
   3.115          goto out;
   3.116 @@ -582,7 +581,7 @@ int amd_iommu_sync_p2m(struct domain *d)
   3.117          if ( iommu_l2e == 0 )
   3.118          {
   3.119              spin_unlock(&d->page_alloc_lock);
   3.120 -            spin_unlock_irqrestore(&hd->mapping_lock, flags);
   3.121 +            spin_unlock(&hd->mapping_lock);
   3.122              amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
   3.123              return -EFAULT;
   3.124          }
   3.125 @@ -595,7 +594,7 @@ int amd_iommu_sync_p2m(struct domain *d)
   3.126      hd->p2m_synchronized = 1;
   3.127  
   3.128  out:
   3.129 -    spin_unlock_irqrestore(&hd->mapping_lock, flags);
   3.130 +    spin_unlock(&hd->mapping_lock);
   3.131      return 0;
   3.132  }
   3.133  
     4.1 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Feb 19 11:07:33 2009 +0000
     4.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Fri Feb 20 11:05:17 2009 +0000
     4.3 @@ -172,22 +172,18 @@ int amd_iov_detect(void)
     4.4  static int allocate_domain_resources(struct hvm_iommu *hd)
     4.5  {
     4.6      /* allocate root table */
     4.7 -    unsigned long flags;
     4.8 -
     4.9 -    spin_lock_irqsave(&hd->mapping_lock, flags);
    4.10 +    spin_lock(&hd->mapping_lock);
    4.11      if ( !hd->root_table )
    4.12      {
    4.13          hd->root_table = alloc_amd_iommu_pgtable();
    4.14          if ( !hd->root_table )
    4.15 -            goto error_out;
    4.16 +        {
    4.17 +            spin_unlock(&hd->mapping_lock);
    4.18 +            return -ENOMEM;
    4.19 +        }
    4.20      }
    4.21 -    spin_unlock_irqrestore(&hd->mapping_lock, flags);
    4.22 -
    4.23 +    spin_unlock(&hd->mapping_lock);
    4.24      return 0;
    4.25 -
    4.26 - error_out:
    4.27 -    spin_unlock_irqrestore(&hd->mapping_lock, flags);
    4.28 -    return -ENOMEM;
    4.29  }
    4.30  
    4.31  static int get_paging_mode(unsigned long entries)
    4.32 @@ -298,7 +294,6 @@ static int reassign_device( struct domai
    4.33                   bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
    4.34                   source->domain_id, target->domain_id);
    4.35  
    4.36 -    spin_unlock(&pcidevs_lock);
    4.37      return 0;
    4.38  }
    4.39  
    4.40 @@ -352,11 +347,13 @@ static void deallocate_iommu_page_tables
    4.41  {
    4.42      struct hvm_iommu *hd  = domain_hvm_iommu(d);
    4.43  
    4.44 +    spin_lock(&hd->mapping_lock);
    4.45      if ( hd->root_table )
    4.46      {
    4.47          deallocate_next_page_table(hd->root_table, hd->paging_mode);
    4.48          hd->root_table = NULL;
    4.49      }
    4.50 +    spin_unlock(&hd->mapping_lock);
    4.51  }
    4.52  
    4.53