if ( need_iommu_pt_sync(p2m->domain) &&
(lpae_is_valid(orig_pte) || lpae_is_valid(*entry)) )
+ {
+ unsigned int flush_flags = 0;
+
+ if ( lpae_is_valid(orig_pte) )
+ flush_flags |= IOMMU_FLUSHF_modified;
+ if ( lpae_is_valid(*entry) )
+ flush_flags |= IOMMU_FLUSHF_added;
+
rc = iommu_iotlb_flush(p2m->domain, _dfn(gfn_x(sgfn)),
- 1UL << page_order);
+ 1UL << page_order, flush_flags);
+ }
else
rc = 0;
this_cpu(iommu_dont_flush_iotlb) = 0;
- ret = iommu_flush(d, _dfn(xatp->idx - done), done);
+ ret = iommu_iotlb_flush(d, _dfn(xatp->idx - done), done,
+ IOMMU_FLUSHF_added | IOMMU_FLUSHF_modified);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
- ret = iommu_flush(d, _dfn(xatp->gpfn - done), done);
+ ret = iommu_iotlb_flush(d, _dfn(xatp->gpfn - done), done,
+ IOMMU_FLUSHF_added | IOMMU_FLUSHF_modified);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
}
return idx;
}
-static void clear_iommu_pte_present(unsigned long l1_mfn, unsigned long dfn)
+static unsigned int clear_iommu_pte_present(unsigned long l1_mfn,
+ unsigned long dfn)
{
uint64_t *table, *pte;
+ uint32_t entry;
+ unsigned int flush_flags;
table = map_domain_page(_mfn(l1_mfn));
- pte = table + pfn_to_pde_idx(dfn, 1);
+
+ pte = (table + pfn_to_pde_idx(dfn, 1));
+ entry = *pte >> 32;
+
+ flush_flags = get_field_from_reg_u32(entry, IOMMU_PTE_PRESENT_MASK,
+ IOMMU_PTE_PRESENT_SHIFT) ?
+ IOMMU_FLUSHF_modified : 0;
+
*pte = 0;
unmap_domain_page(table);
+
+ return flush_flags;
}
-static bool set_iommu_pde_present(uint32_t *pde, unsigned long next_mfn,
- unsigned int next_level,
- bool iw, bool ir)
+static unsigned int set_iommu_pde_present(uint32_t *pde,
+ unsigned long next_mfn,
+ unsigned int next_level, bool iw,
+ bool ir)
{
uint64_t maddr_next;
uint32_t addr_lo, addr_hi, entry;
- bool need_flush = false, old_present;
+ bool old_present;
+ unsigned int flush_flags = IOMMU_FLUSHF_added;
maddr_next = __pfn_to_paddr(next_mfn);
if ( maddr_old != maddr_next || iw != old_w || ir != old_r ||
old_level != next_level )
- need_flush = true;
+ flush_flags |= IOMMU_FLUSHF_modified;
}
addr_lo = maddr_next & DMA_32BIT_MASK;
IOMMU_PDE_PRESENT_SHIFT, &entry);
pde[0] = entry;
- return need_flush;
+ return flush_flags;
}
-static bool set_iommu_pte_present(unsigned long pt_mfn, unsigned long dfn,
- unsigned long next_mfn, int pde_level,
- bool iw, bool ir)
+static unsigned int set_iommu_pte_present(unsigned long pt_mfn,
+ unsigned long dfn,
+ unsigned long next_mfn,
+ int pde_level,
+ bool iw, bool ir)
{
uint64_t *table;
uint32_t *pde;
- bool need_flush;
+ unsigned int flush_flags;
table = map_domain_page(_mfn(pt_mfn));
pde = (uint32_t *)(table + pfn_to_pde_idx(dfn, pde_level));
- need_flush = set_iommu_pde_present(pde, next_mfn, 0, iw, ir);
+ flush_flags = set_iommu_pde_present(pde, next_mfn, 0, iw, ir);
unmap_domain_page(table);
- return need_flush;
+
+ return flush_flags;
}
void amd_iommu_set_root_page_table(uint32_t *dte, uint64_t root_ptr,
}
int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int flags)
+ unsigned int flags, unsigned int *flush_flags)
{
- bool need_flush;
struct domain_iommu *hd = dom_iommu(d);
int rc;
unsigned long pt_mfn[7];
}
/* Install 4k mapping */
- need_flush = set_iommu_pte_present(pt_mfn[1], dfn_x(dfn), mfn_x(mfn), 1,
- !!(flags & IOMMUF_writable),
- !!(flags & IOMMUF_readable));
-
- if ( need_flush )
- amd_iommu_flush_pages(d, dfn_x(dfn), 0);
+ *flush_flags |= set_iommu_pte_present(pt_mfn[1], dfn_x(dfn), mfn_x(mfn),
+ 1, (flags & IOMMUF_writable),
+ (flags & IOMMUF_readable));
spin_unlock(&hd->arch.mapping_lock);
+
return 0;
}
-int amd_iommu_unmap_page(struct domain *d, dfn_t dfn)
+int amd_iommu_unmap_page(struct domain *d, dfn_t dfn,
+ unsigned int *flush_flags)
{
unsigned long pt_mfn[7];
struct domain_iommu *hd = dom_iommu(d);
}
/* mark PTE as 'page not present' */
- clear_iommu_pte_present(pt_mfn[1], dfn_x(dfn));
+ *flush_flags |= clear_iommu_pte_present(pt_mfn[1], dfn_x(dfn));
spin_unlock(&hd->arch.mapping_lock);
- amd_iommu_flush_pages(d, dfn_x(dfn), 0);
return 0;
}
}
int amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
- unsigned int page_count)
+ unsigned int page_count,
+ unsigned int flush_flags)
{
unsigned long dfn_l = dfn_x(dfn);
ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
+ ASSERT(flush_flags);
+
+ /* Unless a PTE was modified, no flush is required */
+ if ( !(flush_flags & IOMMU_FLUSHF_modified) )
+ return 0;
/* If the range wraps then just flush everything */
if ( dfn_l + page_count < dfn_l )
unsigned long npages, i;
unsigned long gfn;
unsigned int flags = !!ir;
+ unsigned int flush_flags = 0;
int rt = 0;
if ( iw )
{
unsigned long frame = gfn + i;
- rt = amd_iommu_map_page(domain, _dfn(frame), _mfn(frame), flags);
+ rt = amd_iommu_map_page(domain, _dfn(frame), _mfn(frame), flags,
+ &flush_flags);
if ( rt != 0 )
- return rt;
+ break;
}
- return 0;
+
+ /* Use while-break to avoid compiler warning */
+ while ( flush_flags &&
+ amd_iommu_flush_iotlb_pages(domain, _dfn(gfn),
+ npages, flush_flags) )
+ break;
+
+ return rt;
}
/* Share p2m table with iommu. */
}
static int __must_check arm_smmu_iotlb_flush(struct domain *d, dfn_t dfn,
- unsigned int page_count)
+ unsigned int page_count,
+ unsigned int flush_flags)
{
+ ASSERT(flush_flags);
+
/* ARM SMMU v1 doesn't have flush by VMA and VMID */
return arm_smmu_iotlb_flush_all(d);
}
}
static int __must_check arm_smmu_map_page(struct domain *d, dfn_t dfn,
- mfn_t mfn, unsigned int flags)
+ mfn_t mfn, unsigned int flags,
+ unsigned int *flush_flags)
{
p2m_type_t t;
0, t);
}
-static int __must_check arm_smmu_unmap_page(struct domain *d, dfn_t dfn)
+static int __must_check arm_smmu_unmap_page(struct domain *d, dfn_t dfn,
+ unsigned int *flush_flags)
{
/*
* This function should only be used by gnttab code when the domain
if ( need_iommu_pt_sync(d) )
{
struct page_info *page;
- unsigned int i = 0;
+ unsigned int i = 0, flush_flags = 0;
int rc = 0;
page_list_for_each ( page, &d->page_list )
== PGT_writable_page) )
mapping |= IOMMUF_writable;
- ret = hd->platform_ops->map_page(d, _dfn(dfn), _mfn(mfn),
- mapping);
+ ret = iommu_map(d, _dfn(dfn), _mfn(mfn), mapping, 0,
+ &flush_flags);
+
if ( !rc )
rc = ret;
process_pending_softirqs();
}
+ /* Use while-break to avoid compiler warning */
+ while ( iommu_iotlb_flush_all(d, flush_flags) )
+ break;
+
if ( rc )
printk(XENLOG_WARNING "d%d: IOMMU mapping failed: %d\n",
d->domain_id, rc);
arch_iommu_domain_destroy(d);
}
-int iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int page_order, unsigned int flags)
+int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+ unsigned int page_order, unsigned int flags,
+ unsigned int *flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
for ( i = 0; i < (1ul << page_order); i++ )
{
- rc = hd->platform_ops->map_page(d, dfn_add(dfn, i),
- mfn_add(mfn, i), flags);
+ rc = hd->platform_ops->map_page(d, dfn_add(dfn, i), mfn_add(mfn, i),
+ flags, flush_flags);
if ( likely(!rc) )
continue;
while ( i-- )
/* if statement to satisfy __must_check */
- if ( hd->platform_ops->unmap_page(d, dfn_add(dfn, i)) )
+ if ( hd->platform_ops->unmap_page(d, dfn_add(dfn, i),
+ flush_flags) )
continue;
if ( !is_hardware_domain(d) )
return rc;
}
-int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned int page_order)
+int iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+ unsigned int page_order, unsigned int flags)
+{
+ unsigned int flush_flags = 0;
+ int rc = iommu_map(d, dfn, mfn, page_order, flags, &flush_flags);
+
+ if ( !this_cpu(iommu_dont_flush_iotlb) )
+ {
+ int err = iommu_iotlb_flush(d, dfn, (1u << page_order),
+ flush_flags);
+
+ if ( !rc )
+ rc = err;
+ }
+
+ return rc;
+}
+
+int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order,
+ unsigned int *flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
for ( i = 0; i < (1ul << page_order); i++ )
{
- int err = hd->platform_ops->unmap_page(d, dfn_add(dfn, i));
+ int err = hd->platform_ops->unmap_page(d, dfn_add(dfn, i),
+ flush_flags);
if ( likely(!err) )
continue;
return rc;
}
+int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned int page_order)
+{
+ unsigned int flush_flags = 0;
+ int rc = iommu_unmap(d, dfn, page_order, &flush_flags);
+
+ if ( !this_cpu(iommu_dont_flush_iotlb) )
+ {
+ int err = iommu_iotlb_flush(d, dfn, (1u << page_order),
+ flush_flags);
+
+ if ( !rc )
+ rc = err;
+ }
+
+ return rc;
+}
+
int iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
unsigned int *flags)
{
cpumask_cycle(smp_processor_id(), &cpu_online_map));
}
-int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned int page_count)
+int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned int page_count,
+ unsigned int flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
if ( !iommu_enabled || !hd->platform_ops ||
- !hd->platform_ops->iotlb_flush || !page_count )
+ !hd->platform_ops->iotlb_flush || !page_count || !flush_flags )
return 0;
if ( dfn_eq(dfn, INVALID_DFN) )
return -EINVAL;
- rc = hd->platform_ops->iotlb_flush(d, dfn, page_count);
+ rc = hd->platform_ops->iotlb_flush(d, dfn, page_count, flush_flags);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU IOTLB flush failed: %d, dfn %"PRI_dfn", page count %u\n",
- d->domain_id, rc, dfn_x(dfn), page_count);
+ "d%d: IOMMU IOTLB flush failed: %d, dfn %"PRI_dfn", page count %u flags %x\n",
+ d->domain_id, rc, dfn_x(dfn), page_count, flush_flags);
if ( !is_hardware_domain(d) )
domain_crash(d);
return rc;
}
-int iommu_iotlb_flush_all(struct domain *d)
+int iommu_iotlb_flush_all(struct domain *d, unsigned int flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
- if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush_all )
+ if ( !iommu_enabled || !hd->platform_ops ||
+ !hd->platform_ops->iotlb_flush_all || !flush_flags )
return 0;
+ /*
+ * The operation does a full flush so we don't need to pass the
+ * flush_flags in.
+ */
rc = hd->platform_ops->iotlb_flush_all(d);
if ( unlikely(rc) )
{
static int __must_check iommu_flush_iotlb_pages(struct domain *d,
dfn_t dfn,
- unsigned int page_count)
+ unsigned int page_count,
+ unsigned int flush_flags)
{
ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
+ ASSERT(flush_flags);
- return iommu_flush_iotlb(d, dfn, 1, page_count);
+ return iommu_flush_iotlb(d, dfn, flush_flags & IOMMU_FLUSHF_modified,
+ page_count);
}
static int __must_check iommu_flush_iotlb_all(struct domain *d)
}
/* clear one page's page table */
-static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr)
+static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr,
+ unsigned int *flush_flags)
{
struct domain_iommu *hd = dom_iommu(domain);
struct dma_pte *page = NULL, *pte = NULL;
}
dma_clear_pte(*pte);
+ *flush_flags |= IOMMU_FLUSHF_modified;
+
spin_unlock(&hd->arch.mapping_lock);
iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
- if ( !this_cpu(iommu_dont_flush_iotlb) )
- rc = iommu_flush_iotlb_pages(domain, daddr_to_dfn(addr), 1);
-
unmap_vtd_domain_page(page);
return rc;
spin_unlock(&hd->arch.mapping_lock);
}
-static int __must_check intel_iommu_map_page(struct domain *d,
- dfn_t dfn, mfn_t mfn,
- unsigned int flags)
+static int __must_check intel_iommu_map_page(struct domain *d, dfn_t dfn,
+ mfn_t mfn, unsigned int flags,
+ unsigned int *flush_flags)
{
struct domain_iommu *hd = dom_iommu(d);
struct dma_pte *page, *pte, old, new = {};
spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
- if ( !this_cpu(iommu_dont_flush_iotlb) )
- rc = iommu_flush_iotlb(d, dfn, dma_pte_present(old), 1);
+ *flush_flags |= IOMMU_FLUSHF_added;
+ if ( dma_pte_present(old) )
+ *flush_flags |= IOMMU_FLUSHF_modified;
return rc;
}
-static int __must_check intel_iommu_unmap_page(struct domain *d,
- dfn_t dfn)
+static int __must_check intel_iommu_unmap_page(struct domain *d, dfn_t dfn,
+ unsigned int *flush_flags)
{
/* Do nothing if VT-d shares EPT page table */
if ( iommu_use_hap_pt(d) )
if ( iommu_hwdom_passthrough && is_hardware_domain(d) )
return 0;
- return dma_pte_clear_one(d, dfn_to_daddr(dfn));
+ return dma_pte_clear_one(d, dfn_to_daddr(dfn), flush_flags);
}
static int intel_iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
int arch_iommu_populate_page_table(struct domain *d)
{
- const struct domain_iommu *hd = dom_iommu(d);
struct page_info *page;
int rc = 0, n = 0;
- this_cpu(iommu_dont_flush_iotlb) = 1;
spin_lock(&d->page_alloc_lock);
if ( unlikely(d->is_dying) )
{
unsigned long mfn = mfn_x(page_to_mfn(page));
unsigned long gfn = mfn_to_gmfn(d, mfn);
+ unsigned int flush_flags = 0;
if ( gfn != gfn_x(INVALID_GFN) )
{
ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
BUG_ON(SHARED_M2P(gfn));
- rc = hd->platform_ops->map_page(d, _dfn(gfn), _mfn(mfn),
- IOMMUF_readable |
- IOMMUF_writable);
+ rc = iommu_map(d, _dfn(gfn), _mfn(mfn), PAGE_ORDER_4K,
+ IOMMUF_readable | IOMMUF_writable,
+ &flush_flags);
}
if ( rc )
{
}
spin_unlock(&d->page_alloc_lock);
- this_cpu(iommu_dont_flush_iotlb) = 0;
if ( !rc )
- rc = iommu_iotlb_flush_all(d);
+ /*
+ * flush_flags are not tracked across hypercall pre-emption so
+ * assume a full flush is necessary.
+ */
+ rc = iommu_iotlb_flush_all(
+ d, IOMMU_FLUSHF_added | IOMMU_FLUSHF_modified);
if ( rc && rc != -ERESTART )
iommu_teardown(d);
void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
{
unsigned long i, top, max_pfn;
+ unsigned int flush_flags = 0;
BUG_ON(!is_hardware_domain(d));
if ( paging_mode_translate(d) )
rc = set_identity_p2m_entry(d, pfn, p2m_access_rw, 0);
else
- rc = iommu_legacy_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
- IOMMUF_readable | IOMMUF_writable);
+ rc = iommu_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
+ IOMMUF_readable | IOMMUF_writable, &flush_flags);
+
if ( rc )
printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n",
d->domain_id, rc);
if (!(i & 0xfffff))
process_pending_softirqs();
}
+
+ /* Use if to avoid compiler warning */
+ if ( iommu_iotlb_flush_all(d, flush_flags) )
+ return;
}
/*
/* mapping functions */
int __must_check amd_iommu_map_page(struct domain *d, dfn_t dfn,
- mfn_t mfn, unsigned int flags);
-int __must_check amd_iommu_unmap_page(struct domain *d, dfn_t dfn);
+ mfn_t mfn, unsigned int flags,
+ unsigned int *flush_flags);
+int __must_check amd_iommu_unmap_page(struct domain *d, dfn_t dfn,
+ unsigned int *flush_flags);
uint64_t amd_iommu_get_address_from_pte(void *entry);
int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
paddr_t phys_addr, unsigned long size,
int iw, int ir);
int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
- unsigned int page_count);
+ unsigned int page_count,
+ unsigned int flush_flags);
int __must_check amd_iommu_flush_iotlb_all(struct domain *d);
/* Share p2m table with iommu */
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
+/*
+ * flush_flags:
+ *
+ * IOMMU_FLUSHF_added -> A new 'present' PTE has been inserted.
+ * IOMMU_FLUSHF_modified -> An existing 'present' PTE has been modified
+ * (whether the new PTE value is 'present' or not).
+ *
+ * These flags are passed back from map/unmap operations and passed into
+ * flush operations.
+ */
+enum
+{
+ _IOMMU_FLUSHF_added,
+ _IOMMU_FLUSHF_modified,
+};
+#define IOMMU_FLUSHF_added (1u << _IOMMU_FLUSHF_added)
+#define IOMMU_FLUSHF_modified (1u << _IOMMU_FLUSHF_modified)
+
+int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+ unsigned int page_order, unsigned int flags,
+ unsigned int *flush_flags);
+int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
+ unsigned int page_order,
+ unsigned int *flush_flags);
+
int __must_check iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned int page_order,
unsigned int flags);
int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
unsigned int *flags);
+int __must_check iommu_iotlb_flush(struct domain *d, dfn_t dfn,
+ unsigned int page_count,
+ unsigned int flush_flags);
+int __must_check iommu_iotlb_flush_all(struct domain *d,
+ unsigned int flush_flags);
+
enum iommu_feature
{
IOMMU_FEAT_COHERENT_WALK,
* other by the caller in order to have meaningful results.
*/
int __must_check (*map_page)(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int flags);
- int __must_check (*unmap_page)(struct domain *d, dfn_t dfn);
+ unsigned int flags,
+ unsigned int *flush_flags);
+ int __must_check (*unmap_page)(struct domain *d, dfn_t dfn,
+ unsigned int *flush_flags);
int __must_check (*lookup_page)(struct domain *d, dfn_t dfn, mfn_t *mfn,
unsigned int *flags);
void (*share_p2m)(struct domain *d);
void (*crash_shutdown)(void);
int __must_check (*iotlb_flush)(struct domain *d, dfn_t dfn,
- unsigned int page_count);
+ unsigned int page_count,
+ unsigned int flush_flags);
int __must_check (*iotlb_flush_all)(struct domain *d);
int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
void (*dump_p2m_table)(struct domain *d);
int iommu_do_domctl(struct xen_domctl *, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
-int __must_check iommu_iotlb_flush(struct domain *d, dfn_t dfn,
- unsigned int page_count);
-int __must_check iommu_iotlb_flush_all(struct domain *d);
-
void iommu_dev_iotlb_flush_timeout(struct domain *d, struct pci_dev *pdev);
/*