]> xenbits.xensource.com Git - people/dwmw2/xen.git/commitdiff
passthrough/vtd: Don't DMA to the stack in queue_invalidate_wait() stable-4.12
authorAndrew Cooper <andrew.cooper3@citrix.com>
Fri, 26 Jul 2019 09:02:22 +0000 (11:02 +0200)
committerJan Beulich <jbeulich@suse.com>
Fri, 26 Jul 2019 09:02:22 +0000 (11:02 +0200)
DMA-ing to the stack is considered bad practice.  In this case, if a
timeout occurs because of a sluggish device which is processing the
request, the completion notification will corrupt the stack of a
subsequent deeper call tree.

Place the poll_slot in a percpu area and DMA to that instead.

Fix the declaration of saddr in struct qinval_entry, to avoid a shift by
two.  The requirement here is that the DMA address is dword aligned,
which is covered by poll_slot's type.

This change does not address other issues.  Correlating completions
after a timeout with their request is a more complicated change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <JBeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
master commit: 8970834eb95586d87b064e8c7fc49ee8d2875db4
master date: 2019-07-24 14:40:10 +0100

xen/drivers/passthrough/vtd/iommu.h
xen/drivers/passthrough/vtd/qinval.c

index 1a992f72d6db9284b02208383edf56c720ada5e0..c9290a39961530594953614ee9b81385100466f2 100644 (file)
@@ -444,8 +444,7 @@ struct qinval_entry {
                     sdata   : 32;
             }lo;
             struct {
-                u64 res_1   : 2,
-                    saddr   : 62;
+                u64 saddr;
             }hi;
         }inv_wait_dsc;
     }q;
index 01447cf9a84e52ba4d71e1c48bfb353468bc876f..09cbd36ebb8c9b5df598cb55f7fc9e352841e720 100644 (file)
@@ -147,13 +147,15 @@ static int __must_check queue_invalidate_wait(struct iommu *iommu,
                                               u8 iflag, u8 sw, u8 fn,
                                               bool_t flush_dev_iotlb)
 {
-    volatile u32 poll_slot = QINVAL_STAT_INIT;
+    static DEFINE_PER_CPU(uint32_t, poll_slot);
     unsigned int index;
     unsigned long flags;
     u64 entry_base;
     struct qinval_entry *qinval_entry, *qinval_entries;
+    uint32_t *this_poll_slot = &this_cpu(poll_slot);
 
     spin_lock_irqsave(&iommu->register_lock, flags);
+    ACCESS_ONCE(*this_poll_slot) = QINVAL_STAT_INIT;
     index = qinval_next_index(iommu);
     entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
                  ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
@@ -166,8 +168,7 @@ static int __must_check queue_invalidate_wait(struct iommu *iommu,
     qinval_entry->q.inv_wait_dsc.lo.fn = fn;
     qinval_entry->q.inv_wait_dsc.lo.res_1 = 0;
     qinval_entry->q.inv_wait_dsc.lo.sdata = QINVAL_STAT_DONE;
-    qinval_entry->q.inv_wait_dsc.hi.res_1 = 0;
-    qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(&poll_slot) >> 2;
+    qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(this_poll_slot);
 
     unmap_vtd_domain_page(qinval_entries);
     qinval_update_qtail(iommu, index);
@@ -182,7 +183,7 @@ static int __must_check queue_invalidate_wait(struct iommu *iommu,
         timeout = NOW() + MILLISECS(flush_dev_iotlb ?
                                     iommu_dev_iotlb_timeout : VTD_QI_TIMEOUT);
 
-        while ( poll_slot != QINVAL_STAT_DONE )
+        while ( ACCESS_ONCE(*this_poll_slot) != QINVAL_STAT_DONE )
         {
             if ( NOW() > timeout )
             {