ia64/xen-unstable
changeset 19733:a69daf23602a
VT-d: define a macro for waiting hardare completion
When set some registers of VT-d, it must wait for hardware
completion. There are lots of duplicated code to do that. This patch
defines a macro for it, thus it is much cleaner.
Signed-off-by: Weidong Han <weidong.han@intel.com>
When set some registers of VT-d, it must wait for hardware
completion. There are lots of duplicated code to do that. This patch
defines a macro for it, thus it is much cleaner.
Signed-off-by: Weidong Han <weidong.han@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Jun 05 09:27:18 2009 +0100 (2009-06-05) |
parents | 931dbe86e5f3 |
children | 4fb8a6c993e2 |
files | xen/drivers/passthrough/vtd/dmar.h xen/drivers/passthrough/vtd/intremap.c xen/drivers/passthrough/vtd/iommu.c xen/drivers/passthrough/vtd/qinval.c xen/drivers/passthrough/vtd/utils.c |
line diff
1.1 --- a/xen/drivers/passthrough/vtd/dmar.h Fri Jun 05 09:26:39 2009 +0100 1.2 +++ b/xen/drivers/passthrough/vtd/dmar.h Fri Jun 05 09:27:18 2009 +0100 1.3 @@ -90,6 +90,20 @@ void dmar_scope_remove_buses(struct dmar 1.4 1.5 #define DMAR_OPERATION_TIMEOUT MILLISECS(1000) 1.6 1.7 +#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ 1.8 +do { \ 1.9 + s_time_t start_time = NOW(); \ 1.10 + while (1) { \ 1.11 + sts = op(iommu->reg, offset); \ 1.12 + if ( cond ) \ 1.13 + break; \ 1.14 + if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) \ 1.15 + panic("%s:%d:%s: DMAR hardware is malfunctional\n", \ 1.16 + __FILE__, __LINE__, __func__); \ 1.17 + cpu_relax(); \ 1.18 + } \ 1.19 +} while (0) 1.20 + 1.21 int vtd_hw_check(void); 1.22 void disable_pmr(struct iommu *iommu); 1.23 int is_usb_device(u8 bus, u8 devfn);
2.1 --- a/xen/drivers/passthrough/vtd/intremap.c Fri Jun 05 09:26:39 2009 +0100 2.2 +++ b/xen/drivers/passthrough/vtd/intremap.c Fri Jun 05 09:27:18 2009 +0100 2.3 @@ -534,7 +534,7 @@ void msi_msg_write_remap_rte( 2.4 int enable_intremap(struct iommu *iommu) 2.5 { 2.6 struct ir_ctrl *ir_ctrl; 2.7 - s_time_t start_time; 2.8 + u32 sts; 2.9 2.10 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap); 2.11 2.12 @@ -564,38 +564,22 @@ int enable_intremap(struct iommu *iommu) 2.13 iommu->gcmd |= DMA_GCMD_SIRTP; 2.14 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd); 2.15 2.16 - /* Make sure hardware complete it */ 2.17 - start_time = NOW(); 2.18 - while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_SIRTPS) ) 2.19 - { 2.20 - if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) ) 2.21 - panic("Cannot set SIRTP field for interrupt remapping\n"); 2.22 - cpu_relax(); 2.23 - } 2.24 - 2.25 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, 2.26 + (sts & DMA_GSTS_SIRTPS), sts); 2.27 + 2.28 /* enable comaptiblity format interrupt pass through */ 2.29 iommu->gcmd |= DMA_GCMD_CFI; 2.30 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd); 2.31 2.32 - start_time = NOW(); 2.33 - while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_CFIS) ) 2.34 - { 2.35 - if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) ) 2.36 - panic("Cannot set CFI field for interrupt remapping\n"); 2.37 - cpu_relax(); 2.38 - } 2.39 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, 2.40 + (sts & DMA_GSTS_CFIS), sts); 2.41 2.42 /* enable interrupt remapping hardware */ 2.43 iommu->gcmd |= DMA_GCMD_IRE; 2.44 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd); 2.45 2.46 - start_time = NOW(); 2.47 - while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_IRES) ) 2.48 - { 2.49 - if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) ) 2.50 - panic("Cannot set IRE field for interrupt remapping\n"); 2.51 - cpu_relax(); 2.52 - } 2.53 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, 2.54 + (sts & DMA_GSTS_IRES), sts); 2.55 2.56 /* After set SIRTP, we should do globally invalidate the IEC */ 2.57 iommu_flush_iec_global(iommu); 2.58 @@ -605,18 +589,13 @@ int enable_intremap(struct iommu *iommu) 2.59 2.60 void disable_intremap(struct iommu *iommu) 2.61 { 2.62 - s_time_t start_time; 2.63 + u32 sts; 2.64 2.65 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap); 2.66 2.67 iommu->gcmd &= ~(DMA_GCMD_SIRTP | DMA_GCMD_CFI | DMA_GCMD_IRE); 2.68 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd); 2.69 2.70 - start_time = NOW(); 2.71 - while ( dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_IRES ) 2.72 - { 2.73 - if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) ) 2.74 - panic("Cannot clear IRE field for interrupt remapping\n"); 2.75 - cpu_relax(); 2.76 - } 2.77 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, 2.78 + !(sts & DMA_GSTS_IRES), sts); 2.79 }
3.1 --- a/xen/drivers/passthrough/vtd/iommu.c Fri Jun 05 09:26:39 2009 +0100 3.2 +++ b/xen/drivers/passthrough/vtd/iommu.c Fri Jun 05 09:27:18 2009 +0100 3.3 @@ -230,7 +230,6 @@ static void iommu_flush_write_buffer(str 3.4 { 3.5 u32 val; 3.6 unsigned long flag; 3.7 - s_time_t start_time; 3.8 3.9 if ( !rwbf_quirk && !cap_rwbf(iommu->cap) ) 3.10 return; 3.11 @@ -240,17 +239,9 @@ static void iommu_flush_write_buffer(str 3.12 dmar_writel(iommu->reg, DMAR_GCMD_REG, val); 3.13 3.14 /* Make sure hardware complete it */ 3.15 - start_time = NOW(); 3.16 - for ( ; ; ) 3.17 - { 3.18 - val = dmar_readl(iommu->reg, DMAR_GSTS_REG); 3.19 - if ( !(val & DMA_GSTS_WBFS) ) 3.20 - break; 3.21 - if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) 3.22 - panic("%s: DMAR hardware is malfunctional," 3.23 - " please disable IOMMU\n", __func__); 3.24 - cpu_relax(); 3.25 - } 3.26 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, 3.27 + !(val & DMA_GSTS_WBFS), val); 3.28 + 3.29 spin_unlock_irqrestore(&iommu->register_lock, flag); 3.30 } 3.31 3.32 @@ -263,7 +254,6 @@ static int flush_context_reg( 3.33 struct iommu *iommu = (struct iommu *) _iommu; 3.34 u64 val = 0; 3.35 unsigned long flag; 3.36 - s_time_t start_time; 3.37 3.38 /* 3.39 * In the non-present entry flush case, if hardware doesn't cache 3.40 @@ -301,17 +291,9 @@ static int flush_context_reg( 3.41 dmar_writeq(iommu->reg, DMAR_CCMD_REG, val); 3.42 3.43 /* Make sure hardware complete it */ 3.44 - start_time = NOW(); 3.45 - for ( ; ; ) 3.46 - { 3.47 - val = dmar_readq(iommu->reg, DMAR_CCMD_REG); 3.48 - if ( !(val & DMA_CCMD_ICC) ) 3.49 - break; 3.50 - if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) 3.51 - panic("%s: DMAR hardware is malfunctional," 3.52 - " please disable IOMMU\n", __func__); 3.53 - cpu_relax(); 3.54 - } 3.55 + IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, dmar_readq, 3.56 + !(val & DMA_CCMD_ICC), val); 3.57 + 3.58 spin_unlock_irqrestore(&iommu->register_lock, flag); 3.59 /* flush context entry will implicitly flush write buffer */ 3.60 return 0; 3.61 @@ -352,7 +334,6 @@ static int flush_iotlb_reg(void *_iommu, 3.62 int tlb_offset = ecap_iotlb_offset(iommu->ecap); 3.63 u64 val = 0, val_iva = 0; 3.64 unsigned long flag; 3.65 - s_time_t start_time; 3.66 3.67 /* 3.68 * In the non-present entry flush case, if hardware doesn't cache 3.69 @@ -399,17 +380,8 @@ static int flush_iotlb_reg(void *_iommu, 3.70 dmar_writeq(iommu->reg, tlb_offset + 8, val); 3.71 3.72 /* Make sure hardware complete it */ 3.73 - start_time = NOW(); 3.74 - for ( ; ; ) 3.75 - { 3.76 - val = dmar_readq(iommu->reg, tlb_offset + 8); 3.77 - if ( !(val & DMA_TLB_IVT) ) 3.78 - break; 3.79 - if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) 3.80 - panic("%s: DMAR hardware is malfunctional," 3.81 - " please disable IOMMU\n", __func__); 3.82 - cpu_relax(); 3.83 - } 3.84 + IOMMU_WAIT_OP(iommu, (tlb_offset + 8), dmar_readq, 3.85 + !(val & DMA_TLB_IVT), val); 3.86 spin_unlock_irqrestore(&iommu->register_lock, flag); 3.87 3.88 /* check IOTLB invalidation granularity */ 3.89 @@ -578,7 +550,6 @@ static int iommu_set_root_entry(struct i 3.90 { 3.91 u32 cmd, sts; 3.92 unsigned long flags; 3.93 - s_time_t start_time; 3.94 3.95 spin_lock(&iommu->lock); 3.96 3.97 @@ -597,18 +568,8 @@ static int iommu_set_root_entry(struct i 3.98 dmar_writel(iommu->reg, DMAR_GCMD_REG, cmd); 3.99 3.100 /* Make sure hardware complete it */ 3.101 - start_time = NOW(); 3.102 - for ( ; ; ) 3.103 - { 3.104 - sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); 3.105 - if ( sts & DMA_GSTS_RTPS ) 3.106 - break; 3.107 - if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) 3.108 - panic("%s: DMAR hardware is malfunctional," 3.109 - " please disable IOMMU\n", __func__); 3.110 - cpu_relax(); 3.111 - } 3.112 - 3.113 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, 3.114 + (sts & DMA_GSTS_RTPS), sts); 3.115 spin_unlock_irqrestore(&iommu->register_lock, flags); 3.116 3.117 return 0; 3.118 @@ -618,25 +579,16 @@ static void iommu_enable_translation(str 3.119 { 3.120 u32 sts; 3.121 unsigned long flags; 3.122 - s_time_t start_time; 3.123 3.124 dprintk(XENLOG_INFO VTDPREFIX, 3.125 "iommu_enable_translation: iommu->reg = %p\n", iommu->reg); 3.126 spin_lock_irqsave(&iommu->register_lock, flags); 3.127 iommu->gcmd |= DMA_GCMD_TE; 3.128 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd); 3.129 + 3.130 /* Make sure hardware complete it */ 3.131 - start_time = NOW(); 3.132 - for ( ; ; ) 3.133 - { 3.134 - sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); 3.135 - if ( sts & DMA_GSTS_TES ) 3.136 - break; 3.137 - if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) 3.138 - panic("%s: DMAR hardware is malfunctional," 3.139 - " please disable IOMMU\n", __func__); 3.140 - cpu_relax(); 3.141 - } 3.142 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, 3.143 + (sts & DMA_GSTS_TES), sts); 3.144 3.145 /* Disable PMRs when VT-d engine takes effect per spec definition */ 3.146 disable_pmr(iommu); 3.147 @@ -647,24 +599,14 @@ static void iommu_disable_translation(st 3.148 { 3.149 u32 sts; 3.150 unsigned long flags; 3.151 - s_time_t start_time; 3.152 3.153 spin_lock_irqsave(&iommu->register_lock, flags); 3.154 iommu->gcmd &= ~ DMA_GCMD_TE; 3.155 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd); 3.156 3.157 /* Make sure hardware complete it */ 3.158 - start_time = NOW(); 3.159 - for ( ; ; ) 3.160 - { 3.161 - sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); 3.162 - if ( !(sts & DMA_GSTS_TES) ) 3.163 - break; 3.164 - if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) 3.165 - panic("%s: DMAR hardware is malfunctional," 3.166 - " please disable IOMMU\n", __func__); 3.167 - cpu_relax(); 3.168 - } 3.169 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, 3.170 + !(sts & DMA_GSTS_TES), sts); 3.171 spin_unlock_irqrestore(&iommu->register_lock, flags); 3.172 } 3.173
4.1 --- a/xen/drivers/passthrough/vtd/qinval.c Fri Jun 05 09:26:39 2009 +0100 4.2 +++ b/xen/drivers/passthrough/vtd/qinval.c Fri Jun 05 09:27:18 2009 +0100 4.3 @@ -418,9 +418,9 @@ static int flush_iotlb_qi( 4.4 4.5 int enable_qinval(struct iommu *iommu) 4.6 { 4.7 - s_time_t start_time; 4.8 struct qi_ctrl *qi_ctrl; 4.9 struct iommu_flush *flush; 4.10 + u32 sts; 4.11 4.12 qi_ctrl = iommu_qi_ctrl(iommu); 4.13 flush = iommu_get_flush(iommu); 4.14 @@ -458,13 +458,8 @@ int enable_qinval(struct iommu *iommu) 4.15 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd); 4.16 4.17 /* Make sure hardware complete it */ 4.18 - start_time = NOW(); 4.19 - while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_QIES) ) 4.20 - { 4.21 - if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) ) 4.22 - panic("Cannot set QIE field for queue invalidation\n"); 4.23 - cpu_relax(); 4.24 - } 4.25 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, 4.26 + (sts & DMA_GSTS_QIES), sts); 4.27 4.28 qinval_enabled = 1; 4.29 return 0; 4.30 @@ -472,7 +467,7 @@ int enable_qinval(struct iommu *iommu) 4.31 4.32 void disable_qinval(struct iommu *iommu) 4.33 { 4.34 - s_time_t start_time; 4.35 + u32 sts; 4.36 4.37 ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval); 4.38 4.39 @@ -480,11 +475,6 @@ void disable_qinval(struct iommu *iommu) 4.40 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd); 4.41 4.42 /* Make sure hardware complete it */ 4.43 - start_time = NOW(); 4.44 - while ( dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_QIES ) 4.45 - { 4.46 - if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) ) 4.47 - panic("Cannot clear QIE field for queue invalidation\n"); 4.48 - cpu_relax(); 4.49 - } 4.50 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, 4.51 + !(sts & DMA_GSTS_QIES), sts); 4.52 }
5.1 --- a/xen/drivers/passthrough/vtd/utils.c Fri Jun 05 09:26:39 2009 +0100 5.2 +++ b/xen/drivers/passthrough/vtd/utils.c Fri Jun 05 09:27:18 2009 +0100 5.3 @@ -38,27 +38,16 @@ int is_usb_device(u8 bus, u8 devfn) 5.4 /* Disable vt-d protected memory registers. */ 5.5 void disable_pmr(struct iommu *iommu) 5.6 { 5.7 - s_time_t start_time; 5.8 - unsigned int val; 5.9 + u32 val; 5.10 5.11 val = dmar_readl(iommu->reg, DMAR_PMEN_REG); 5.12 if ( !(val & DMA_PMEN_PRS) ) 5.13 return; 5.14 5.15 dmar_writel(iommu->reg, DMAR_PMEN_REG, val & ~DMA_PMEN_EPM); 5.16 - start_time = NOW(); 5.17 5.18 - for ( ; ; ) 5.19 - { 5.20 - val = dmar_readl(iommu->reg, DMAR_PMEN_REG); 5.21 - if ( (val & DMA_PMEN_PRS) == 0 ) 5.22 - break; 5.23 - 5.24 - if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) 5.25 - panic("Disable PMRs timeout\n"); 5.26 - 5.27 - cpu_relax(); 5.28 - } 5.29 + IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, dmar_readl, 5.30 + !(val & DMA_PMEN_PRS), val); 5.31 5.32 dprintk(XENLOG_INFO VTDPREFIX, 5.33 "Disabled protected memory registers\n");