if ( d && unlikely(need_iommu_pt_sync(d)) && is_pv_domain(d) )
{
- int rc2 = iommu_legacy_unmap(d, _dfn(mfn), PAGE_ORDER_4K);
+ int rc2 = iommu_legacy_unmap(d, _dfn(mfn), 1u << PAGE_ORDER_4K);
if ( !rc )
rc = rc2;
mfn_t mfn = page_to_mfn(page);
if ( (x & PGT_type_mask) == PGT_writable_page )
- rc = iommu_legacy_unmap(d, _dfn(mfn_x(mfn)), PAGE_ORDER_4K);
+ rc = iommu_legacy_unmap(d, _dfn(mfn_x(mfn)),
+ 1ul << PAGE_ORDER_4K);
else
- rc = iommu_legacy_map(d, _dfn(mfn_x(mfn)), mfn, PAGE_ORDER_4K,
+ rc = iommu_legacy_map(d, _dfn(mfn_x(mfn)), mfn,
+ 1ul << PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable);
if ( unlikely(rc) )
need_modify_vtd_table )
{
if ( iommu_use_hap_pt(d) )
- rc = iommu_iotlb_flush(d, _dfn(gfn), (1u << order),
+ rc = iommu_iotlb_flush(d, _dfn(gfn), 1ul << order,
(iommu_flags ? IOMMU_FLUSHF_added : 0) |
(vtd_pte_present ? IOMMU_FLUSHF_modified
: 0));
else if ( need_iommu_pt_sync(d) )
rc = iommu_flags ?
- iommu_legacy_map(d, _dfn(gfn), mfn, order, iommu_flags) :
- iommu_legacy_unmap(d, _dfn(gfn), order);
+ iommu_legacy_map(d, _dfn(gfn), mfn, 1ul << order, iommu_flags) :
+ iommu_legacy_unmap(d, _dfn(gfn), 1ul << order);
}
unmap_domain_page(table);
if ( need_iommu_pt_sync(p2m->domain) &&
(iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) )
rc = iommu_pte_flags
- ? iommu_legacy_map(d, _dfn(gfn), mfn, page_order,
+ ? iommu_legacy_map(d, _dfn(gfn), mfn, 1ul << page_order,
iommu_pte_flags)
- : iommu_legacy_unmap(d, _dfn(gfn), page_order);
+ : iommu_legacy_unmap(d, _dfn(gfn), 1ul << page_order);
/*
* Free old intermediate tables if necessary. This has to be the
{
if ( !is_iommu_enabled(d) )
return 0;
- return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l), PAGE_ORDER_4K,
+ return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l),
+ 1ul << PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable);
}
{
if ( !is_iommu_enabled(d) )
return 0;
- return iommu_legacy_unmap(d, _dfn(gfn_l), PAGE_ORDER_4K);
+ return iommu_legacy_unmap(d, _dfn(gfn_l), 1ul << PAGE_ORDER_4K);
}
gfn_lock(p2m, gfn, 0);
{
for ( i = spfn; i < epfn; i++ )
if ( iommu_legacy_map(hardware_domain, _dfn(i), _mfn(i),
- PAGE_ORDER_4K,
+ 1ul << PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable) )
break;
if ( i != epfn )
while (i-- > old_max)
/* If statement to satisfy __must_check. */
if ( iommu_legacy_unmap(hardware_domain, _dfn(i),
- PAGE_ORDER_4K) )
+ 1ul << PAGE_ORDER_4K) )
continue;
goto destroy_m2p;
kind = IOMMUF_readable;
else
kind = 0;
- if ( kind && iommu_legacy_map(ld, _dfn(mfn_x(mfn)), mfn, 0, kind) )
+ if ( kind && iommu_legacy_map(ld, _dfn(mfn_x(mfn)), mfn, 1, kind) )
{
double_gt_unlock(lgt, rgt);
rc = GNTST_general_error;
kind = mapkind(lgt, rd, op->mfn);
if ( !kind )
- err = iommu_legacy_unmap(ld, _dfn(mfn_x(op->mfn)), 0);
+ err = iommu_legacy_unmap(ld, _dfn(mfn_x(op->mfn)), 1);
else if ( !(kind & MAPKIND_WRITE) )
- err = iommu_legacy_map(ld, _dfn(mfn_x(op->mfn)), op->mfn, 0,
+ err = iommu_legacy_map(ld, _dfn(mfn_x(op->mfn)), op->mfn, 1,
IOMMUF_readable);
double_gt_unlock(lgt, rgt);
paddr_t phys_addr, unsigned long size,
int iw, int ir);
int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags);
int __must_check amd_iommu_flush_iotlb_all(struct domain *d);
return 0;
}
-static unsigned long flush_count(unsigned long dfn, unsigned int page_count,
+static unsigned long flush_count(unsigned long dfn, unsigned long page_count,
unsigned int order)
{
unsigned long start = dfn >> order;
}
int amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags)
{
unsigned long dfn_l = dfn_x(dfn);
}
static int __must_check ipmmu_iotlb_flush(struct domain *d, dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags)
{
ASSERT(flush_flags);
}
static int __must_check arm_smmu_iotlb_flush(struct domain *d, dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags)
{
ASSERT(flush_flags);
}
int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int page_order, unsigned int flags,
+ unsigned long page_count, unsigned int flags,
unsigned int *flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
if ( !is_iommu_enabled(d) )
return 0;
- ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
- ASSERT(IS_ALIGNED(mfn_x(mfn), (1ul << page_order)));
-
- for ( i = 0; i < (1ul << page_order); i++ )
+ for ( i = 0; i < page_count; i++ )
{
rc = iommu_call(hd->platform_ops, map_page, d, dfn_add(dfn, i),
mfn_add(mfn, i), flags, flush_flags);
* Something went wrong so, if we were dealing with more than a single
* page, flush everything and clear flush flags.
*/
- if ( page_order && unlikely(rc) && !iommu_iotlb_flush_all(d, *flush_flags) )
+ if ( page_count > 1 && unlikely(rc) &&
+ !iommu_iotlb_flush_all(d, *flush_flags) )
*flush_flags = 0;
return rc;
}
int iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int page_order, unsigned int flags)
+ unsigned long page_count, unsigned int flags)
{
unsigned int flush_flags = 0;
- int rc = iommu_map(d, dfn, mfn, page_order, flags, &flush_flags);
+ int rc = iommu_map(d, dfn, mfn, page_count, flags, &flush_flags);
if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
- rc = iommu_iotlb_flush(d, dfn, (1u << page_order), flush_flags);
+ rc = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
return rc;
}
-int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order,
+int iommu_unmap(struct domain *d, dfn_t dfn, unsigned long page_count,
unsigned int *flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
if ( !is_iommu_enabled(d) )
return 0;
- ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
-
- for ( i = 0; i < (1ul << page_order); i++ )
+ for ( i = 0; i < page_count; i++ )
{
int err = iommu_call(hd->platform_ops, unmap_page, d, dfn_add(dfn, i),
flush_flags);
* Something went wrong so, if we were dealing with more than a single
* page, flush everything and clear flush flags.
*/
- if ( page_order && unlikely(rc) && !iommu_iotlb_flush_all(d, *flush_flags) )
+ if ( page_count > 1 && unlikely(rc) &&
+ !iommu_iotlb_flush_all(d, *flush_flags) )
*flush_flags = 0;
return rc;
}
-int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned int page_order)
+int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned long page_count)
{
unsigned int flush_flags = 0;
- int rc = iommu_unmap(d, dfn, page_order, &flush_flags);
+ int rc = iommu_unmap(d, dfn, page_count, &flush_flags);
if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
- rc = iommu_iotlb_flush(d, dfn, (1u << page_order), flush_flags);
+ rc = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
return rc;
}
return iommu_call(hd->platform_ops, lookup_page, d, dfn, mfn, flags);
}
-int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned int page_count,
+int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned long page_count,
unsigned int flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU IOTLB flush failed: %d, dfn %"PRI_dfn", page count %u flags %x\n",
+ "d%d: IOMMU IOTLB flush failed: %d, dfn %"PRI_dfn", page count %lu flags %x\n",
d->domain_id, rc, dfn_x(dfn), page_count, flush_flags);
if ( !is_hardware_domain(d) )
static int __must_check iommu_flush_iotlb(struct domain *d, dfn_t dfn,
bool_t dma_old_pte_present,
- unsigned int page_count)
+ unsigned long page_count)
{
struct domain_iommu *hd = dom_iommu(d);
struct acpi_drhd_unit *drhd;
static int __must_check iommu_flush_iotlb_pages(struct domain *d,
dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags)
{
ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
else if ( paging_mode_translate(d) )
rc = set_identity_p2m_entry(d, pfn, p2m_access_rw, 0);
else
- rc = iommu_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
+ rc = iommu_map(d, _dfn(pfn), _mfn(pfn), 1ul << PAGE_ORDER_4K,
IOMMUF_readable | IOMMUF_writable, &flush_flags);
if ( rc )
#define IOMMU_FLUSHF_modified (1u << _IOMMU_FLUSHF_modified)
int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int page_order, unsigned int flags,
+ unsigned long page_count, unsigned int flags,
unsigned int *flush_flags);
int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
- unsigned int page_order,
+ unsigned long page_count,
unsigned int *flush_flags);
int __must_check iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int page_order,
+ unsigned long page_count,
unsigned int flags);
int __must_check iommu_legacy_unmap(struct domain *d, dfn_t dfn,
- unsigned int page_order);
+ unsigned long page_count);
int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
unsigned int *flags);
int __must_check iommu_iotlb_flush(struct domain *d, dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags);
int __must_check iommu_iotlb_flush_all(struct domain *d,
unsigned int flush_flags);
void (*share_p2m)(struct domain *d);
void (*crash_shutdown)(void);
int __must_check (*iotlb_flush)(struct domain *d, dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags);
int __must_check (*iotlb_flush_all)(struct domain *d);
int (*get_reserved_device_memory)(iommu_grdm_t *, void *);