}q;
};
-/* Order of queue invalidation pages(max is 8) */
-#define QINVAL_PAGE_ORDER 2
-
-#define QINVAL_ARCH_PAGE_ORDER (QINVAL_PAGE_ORDER + PAGE_SHIFT_4K - PAGE_SHIFT)
-#define QINVAL_ARCH_PAGE_NR ( QINVAL_ARCH_PAGE_ORDER < 0 ? \
- 1 : \
- 1 << QINVAL_ARCH_PAGE_ORDER )
-
/* Each entry is 16 bytes, so 2^8 entries per page */
#define QINVAL_ENTRY_ORDER ( PAGE_SHIFT - 4 )
-#define QINVAL_ENTRY_NR (1 << (QINVAL_PAGE_ORDER + 8))
+#define QINVAL_MAX_ENTRY_NR (1u << (7 + QINVAL_ENTRY_ORDER))
/* Status data flag */
#define QINVAL_STAT_INIT 0
#define VTD_QI_TIMEOUT 1
+static unsigned int __read_mostly qi_pg_order;
+static unsigned int __read_mostly qi_entry_nr;
+
static int __must_check invalidate_sync(struct iommu *iommu);
static void print_qi_regs(struct iommu *iommu)
tail >>= QINVAL_INDEX_SHIFT;
/* (tail+1 == head) indicates a full queue, wait for HW */
- while ( ( tail + 1 ) % QINVAL_ENTRY_NR ==
+ while ( ((tail + 1) & (qi_entry_nr - 1)) ==
( dmar_readq(iommu->reg, DMAR_IQH_REG) >> QINVAL_INDEX_SHIFT ) )
cpu_relax();
/* Need hold register lock when update tail */
ASSERT( spin_is_locked(&iommu->register_lock) );
- val = (index + 1) % QINVAL_ENTRY_NR;
+ val = (index + 1) & (qi_entry_nr - 1);
dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << QINVAL_INDEX_SHIFT));
}
if ( qi_ctrl->qinval_maddr == 0 )
{
drhd = iommu_to_drhd(iommu);
- qi_ctrl->qinval_maddr = alloc_pgtable_maddr(drhd, QINVAL_ARCH_PAGE_NR);
+ if ( !qi_entry_nr )
+ {
+ /*
+ * With the present synchronous model, we need two slots for every
+ * operation (the operation itself and a wait descriptor). There
+ * can be one such pair of requests pending per CPU. One extra
+ * entry is needed as the ring is considered full when there's
+ * only one entry left.
+ */
+ BUILD_BUG_ON(CONFIG_NR_CPUS * 2 >= QINVAL_MAX_ENTRY_NR);
+ qi_pg_order = get_order_from_bytes((num_present_cpus() * 2 + 1) <<
+ (PAGE_SHIFT -
+ QINVAL_ENTRY_ORDER));
+ qi_entry_nr = 1u << (qi_pg_order + QINVAL_ENTRY_ORDER);
+
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "QI: using %u-entry ring(s)\n", qi_entry_nr);
+ }
+
+ qi_ctrl->qinval_maddr =
+ alloc_pgtable_maddr(drhd, qi_entry_nr >> QINVAL_ENTRY_ORDER);
if ( qi_ctrl->qinval_maddr == 0 )
{
dprintk(XENLOG_WARNING VTDPREFIX,
spin_lock_irqsave(&iommu->register_lock, flags);
- /* Setup Invalidation Queue Address(IQA) register with the
- * address of the page we just allocated. QS field at
- * bits[2:0] to indicate size of queue is one 4KB page.
- * That's 256 entries. Queued Head (IQH) and Queue Tail (IQT)
- * registers are automatically reset to 0 with write
- * to IQA register.
+ /*
+ * Setup Invalidation Queue Address (IQA) register with the address of the
+ * pages we just allocated. The QS field at bits[2:0] indicates the size
+ * (page order) of the queue.
+ *
+ * Queued Head (IQH) and Queue Tail (IQT) registers are automatically
+ * reset to 0 with write to IQA register.
*/
dmar_writeq(iommu->reg, DMAR_IQA_REG,
- qi_ctrl->qinval_maddr | QINVAL_PAGE_ORDER);
+ qi_ctrl->qinval_maddr | qi_pg_order);
dmar_writeq(iommu->reg, DMAR_IQT_REG, 0);