ia64/xen-unstable

changeset 19753:cc07094a02e4

vtd: Clean up lock for VT-d register writes

It should get lock to write VT-d registers. Currently there are some
register writes without lock. This patch complements register_lock for
those writes.

Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 16 11:31:20 2009 +0100 (2009-06-16)
parents fa51db0871e1
children a5f584c1e2f6
files xen/drivers/passthrough/vtd/intremap.c xen/drivers/passthrough/vtd/iommu.c xen/drivers/passthrough/vtd/qinval.c xen/drivers/passthrough/vtd/utils.c
line diff
     1.1 --- a/xen/drivers/passthrough/vtd/intremap.c	Tue Jun 16 11:30:45 2009 +0100
     1.2 +++ b/xen/drivers/passthrough/vtd/intremap.c	Tue Jun 16 11:31:20 2009 +0100
     1.3 @@ -535,6 +535,7 @@ int enable_intremap(struct iommu *iommu)
     1.4  {
     1.5      struct ir_ctrl *ir_ctrl;
     1.6      u32 sts, gcmd;
     1.7 +    unsigned long flags;
     1.8  
     1.9      ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
    1.10  
    1.11 @@ -556,6 +557,8 @@ int enable_intremap(struct iommu *iommu)
    1.12      ir_ctrl->iremap_maddr |=
    1.13              ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIME_SHIFT) : 0;
    1.14  #endif
    1.15 +    spin_lock_irqsave(&iommu->register_lock, flags);
    1.16 +
    1.17      /* set size of the interrupt remapping table */
    1.18      ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE;
    1.19      dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
    1.20 @@ -567,10 +570,12 @@ int enable_intremap(struct iommu *iommu)
    1.21  
    1.22      IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
    1.23                    (sts & DMA_GSTS_SIRTPS), sts);
    1.24 - 
    1.25 +    spin_unlock_irqrestore(&iommu->register_lock, flags);
    1.26 +
    1.27      /* After set SIRTP, must globally invalidate the interrupt entry cache */
    1.28      iommu_flush_iec_global(iommu);
    1.29  
    1.30 +    spin_lock_irqsave(&iommu->register_lock, flags);
    1.31      /* enable comaptiblity format interrupt pass through */
    1.32      gcmd |= DMA_GCMD_CFI;
    1.33      dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
    1.34 @@ -584,6 +589,7 @@ int enable_intremap(struct iommu *iommu)
    1.35  
    1.36      IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
    1.37                    (sts & DMA_GSTS_IRES), sts);
    1.38 +    spin_unlock_irqrestore(&iommu->register_lock, flags);
    1.39  
    1.40      return init_apic_pin_2_ir_idx();
    1.41  }
    1.42 @@ -591,12 +597,15 @@ int enable_intremap(struct iommu *iommu)
    1.43  void disable_intremap(struct iommu *iommu)
    1.44  {
    1.45      u32 sts;
    1.46 +    unsigned long flags;
    1.47  
    1.48      ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
    1.49  
    1.50 +    spin_lock_irqsave(&iommu->register_lock, flags);
    1.51      sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
    1.52      dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE));
    1.53  
    1.54      IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
    1.55                    !(sts & DMA_GSTS_IRES), sts);
    1.56 +    spin_unlock_irqrestore(&iommu->register_lock, flags);
    1.57  }
     2.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Tue Jun 16 11:30:45 2009 +0100
     2.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Tue Jun 16 11:31:20 2009 +0100
     2.3 @@ -229,12 +229,12 @@ static u64 addr_to_dma_page_maddr(struct
     2.4  static void iommu_flush_write_buffer(struct iommu *iommu)
     2.5  {
     2.6      u32 val;
     2.7 -    unsigned long flag;
     2.8 +    unsigned long flags;
     2.9  
    2.10      if ( !rwbf_quirk && !cap_rwbf(iommu->cap) )
    2.11          return;
    2.12  
    2.13 -    spin_lock_irqsave(&iommu->register_lock, flag);
    2.14 +    spin_lock_irqsave(&iommu->register_lock, flags);
    2.15      val = dmar_readl(iommu->reg, DMAR_GSTS_REG);
    2.16      dmar_writel(iommu->reg, DMAR_GCMD_REG, val | DMA_GCMD_WBF);
    2.17  
    2.18 @@ -242,7 +242,7 @@ static void iommu_flush_write_buffer(str
    2.19      IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
    2.20                    !(val & DMA_GSTS_WBFS), val);
    2.21  
    2.22 -    spin_unlock_irqrestore(&iommu->register_lock, flag);
    2.23 +    spin_unlock_irqrestore(&iommu->register_lock, flags);
    2.24  }
    2.25  
    2.26  /* return value determine if we need a write buffer flush */
    2.27 @@ -253,7 +253,7 @@ static int flush_context_reg(
    2.28  {
    2.29      struct iommu *iommu = (struct iommu *) _iommu;
    2.30      u64 val = 0;
    2.31 -    unsigned long flag;
    2.32 +    unsigned long flags;
    2.33  
    2.34      /*
    2.35       * In the non-present entry flush case, if hardware doesn't cache
    2.36 @@ -287,14 +287,14 @@ static int flush_context_reg(
    2.37      }
    2.38      val |= DMA_CCMD_ICC;
    2.39  
    2.40 -    spin_lock_irqsave(&iommu->register_lock, flag);
    2.41 +    spin_lock_irqsave(&iommu->register_lock, flags);
    2.42      dmar_writeq(iommu->reg, DMAR_CCMD_REG, val);
    2.43  
    2.44      /* Make sure hardware complete it */
    2.45      IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, dmar_readq,
    2.46                    !(val & DMA_CCMD_ICC), val);
    2.47  
    2.48 -    spin_unlock_irqrestore(&iommu->register_lock, flag);
    2.49 +    spin_unlock_irqrestore(&iommu->register_lock, flags);
    2.50      /* flush context entry will implicitly flush write buffer */
    2.51      return 0;
    2.52  }
    2.53 @@ -333,7 +333,7 @@ static int flush_iotlb_reg(void *_iommu,
    2.54      struct iommu *iommu = (struct iommu *) _iommu;
    2.55      int tlb_offset = ecap_iotlb_offset(iommu->ecap);
    2.56      u64 val = 0, val_iva = 0;
    2.57 -    unsigned long flag;
    2.58 +    unsigned long flags;
    2.59  
    2.60      /*
    2.61       * In the non-present entry flush case, if hardware doesn't cache
    2.62 @@ -373,7 +373,7 @@ static int flush_iotlb_reg(void *_iommu,
    2.63      if ( cap_write_drain(iommu->cap) )
    2.64          val |= DMA_TLB_WRITE_DRAIN;
    2.65  
    2.66 -    spin_lock_irqsave(&iommu->register_lock, flag);
    2.67 +    spin_lock_irqsave(&iommu->register_lock, flags);
    2.68      /* Note: Only uses first TLB reg currently */
    2.69      if ( val_iva )
    2.70          dmar_writeq(iommu->reg, tlb_offset, val_iva);
    2.71 @@ -382,7 +382,7 @@ static int flush_iotlb_reg(void *_iommu,
    2.72      /* Make sure hardware complete it */
    2.73      IOMMU_WAIT_OP(iommu, (tlb_offset + 8), dmar_readq,
    2.74                    !(val & DMA_TLB_IVT), val);
    2.75 -    spin_unlock_irqrestore(&iommu->register_lock, flag);
    2.76 +    spin_unlock_irqrestore(&iommu->register_lock, flags);
    2.77  
    2.78      /* check IOTLB invalidation granularity */
    2.79      if ( DMA_TLB_IAIG(val) == 0 )
    2.80 @@ -590,10 +590,10 @@ static void iommu_enable_translation(str
    2.81      /* Make sure hardware complete it */
    2.82      IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
    2.83                    (sts & DMA_GSTS_TES), sts);
    2.84 +    spin_unlock_irqrestore(&iommu->register_lock, flags);
    2.85  
    2.86      /* Disable PMRs when VT-d engine takes effect per spec definition */
    2.87      disable_pmr(iommu);
    2.88 -    spin_unlock_irqrestore(&iommu->register_lock, flags);
    2.89  }
    2.90  
    2.91  static void iommu_disable_translation(struct iommu *iommu)
    2.92 @@ -1617,7 +1617,9 @@ static void setup_dom0_devices(struct do
    2.93  void clear_fault_bits(struct iommu *iommu)
    2.94  {
    2.95      u64 val;
    2.96 +    unsigned long flags;
    2.97  
    2.98 +    spin_lock_irqsave(&iommu->register_lock, flags);
    2.99      val = dmar_readq(
   2.100          iommu->reg,
   2.101          cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+0x8);
   2.102 @@ -1626,6 +1628,7 @@ void clear_fault_bits(struct iommu *iomm
   2.103          cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+8,
   2.104          val);
   2.105      dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_FAULTS);
   2.106 +    spin_unlock_irqrestore(&iommu->register_lock, flags);
   2.107  }
   2.108  
   2.109  static int init_vtd_hw(void)
   2.110 @@ -1635,6 +1638,7 @@ static int init_vtd_hw(void)
   2.111      struct iommu_flush *flush = NULL;
   2.112      int vector;
   2.113      int ret;
   2.114 +    unsigned long flags;
   2.115  
   2.116      for_each_drhd_unit ( drhd )
   2.117      {
   2.118 @@ -1652,7 +1656,10 @@ static int init_vtd_hw(void)
   2.119          dma_msi_data_init(iommu, iommu->vector);
   2.120          dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
   2.121          clear_fault_bits(iommu);
   2.122 +
   2.123 +        spin_lock_irqsave(&iommu->register_lock, flags);
   2.124          dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
   2.125 +        spin_unlock_irqrestore(&iommu->register_lock, flags);
   2.126  
   2.127          /* initialize flush functions */
   2.128          flush = iommu_get_flush(iommu);
   2.129 @@ -1942,6 +1949,7 @@ void iommu_resume(void)
   2.130      struct acpi_drhd_unit *drhd;
   2.131      struct iommu *iommu;
   2.132      u32 i;
   2.133 +    unsigned long flags;
   2.134  
   2.135      if ( !iommu_enabled )
   2.136          return;
   2.137 @@ -1954,6 +1962,7 @@ void iommu_resume(void)
   2.138          iommu = drhd->iommu;
   2.139          i = iommu->index;
   2.140  
   2.141 +        spin_lock_irqsave(&iommu->register_lock, flags);
   2.142          dmar_writel(iommu->reg, DMAR_FECTL_REG,
   2.143                      (u32) iommu_state[i][DMAR_FECTL_REG]);
   2.144          dmar_writel(iommu->reg, DMAR_FEDATA_REG,
   2.145 @@ -1962,6 +1971,7 @@ void iommu_resume(void)
   2.146                      (u32) iommu_state[i][DMAR_FEADDR_REG]);
   2.147          dmar_writel(iommu->reg, DMAR_FEUADDR_REG,
   2.148                      (u32) iommu_state[i][DMAR_FEUADDR_REG]);
   2.149 +        spin_unlock_irqrestore(&iommu->register_lock, flags);
   2.150  
   2.151          iommu_enable_translation(iommu);
   2.152      }
     3.1 --- a/xen/drivers/passthrough/vtd/qinval.c	Tue Jun 16 11:30:45 2009 +0100
     3.2 +++ b/xen/drivers/passthrough/vtd/qinval.c	Tue Jun 16 11:31:20 2009 +0100
     3.3 @@ -421,6 +421,7 @@ int enable_qinval(struct iommu *iommu)
     3.4      struct qi_ctrl *qi_ctrl;
     3.5      struct iommu_flush *flush;
     3.6      u32 sts;
     3.7 +    unsigned long flags;
     3.8  
     3.9      qi_ctrl = iommu_qi_ctrl(iommu);
    3.10      flush = iommu_get_flush(iommu);
    3.11 @@ -449,6 +450,8 @@ int enable_qinval(struct iommu *iommu)
    3.12       * to IQA register.
    3.13       */
    3.14      qi_ctrl->qinval_maddr |= IQA_REG_QS;
    3.15 +
    3.16 +    spin_lock_irqsave(&iommu->register_lock, flags);
    3.17      dmar_writeq(iommu->reg, DMAR_IQA_REG, qi_ctrl->qinval_maddr);
    3.18  
    3.19      dmar_writeq(iommu->reg, DMAR_IQT_REG, 0);
    3.20 @@ -460,6 +463,7 @@ int enable_qinval(struct iommu *iommu)
    3.21      /* Make sure hardware complete it */
    3.22      IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
    3.23                    (sts & DMA_GSTS_QIES), sts);
    3.24 +    spin_unlock_irqrestore(&iommu->register_lock, flags);
    3.25  
    3.26      qinval_enabled = 1;
    3.27      return 0;
    3.28 @@ -468,13 +472,16 @@ int enable_qinval(struct iommu *iommu)
    3.29  void disable_qinval(struct iommu *iommu)
    3.30  {
    3.31      u32 sts;
    3.32 +    unsigned long flags;
    3.33  
    3.34      ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
    3.35  
    3.36 +    spin_lock_irqsave(&iommu->register_lock, flags);
    3.37      sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
    3.38      dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE));
    3.39  
    3.40      /* Make sure hardware complete it */
    3.41      IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
    3.42                    !(sts & DMA_GSTS_QIES), sts);
    3.43 +    spin_unlock_irqrestore(&iommu->register_lock, flags);
    3.44  }
     4.1 --- a/xen/drivers/passthrough/vtd/utils.c	Tue Jun 16 11:30:45 2009 +0100
     4.2 +++ b/xen/drivers/passthrough/vtd/utils.c	Tue Jun 16 11:31:20 2009 +0100
     4.3 @@ -39,15 +39,18 @@ int is_usb_device(u8 bus, u8 devfn)
     4.4  void disable_pmr(struct iommu *iommu)
     4.5  {
     4.6      u32 val;
     4.7 +    unsigned long flags;
     4.8  
     4.9      val = dmar_readl(iommu->reg, DMAR_PMEN_REG);
    4.10      if ( !(val & DMA_PMEN_PRS) )
    4.11          return;
    4.12  
    4.13 +    spin_lock_irqsave(&iommu->register_lock, flags);
    4.14      dmar_writel(iommu->reg, DMAR_PMEN_REG, val & ~DMA_PMEN_EPM);
    4.15  
    4.16      IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, dmar_readl,
    4.17                    !(val & DMA_PMEN_PRS), val);
    4.18 +    spin_unlock_irqrestore(&iommu->register_lock, flags);
    4.19  
    4.20      dprintk(XENLOG_INFO VTDPREFIX,
    4.21              "Disabled protected memory registers\n");