mfn_t mfn = page_to_mfn(page);
if ( (x & PGT_type_mask) == PGT_writable_page )
- iommu_ret = iommu_unmap_page(d, _dfn(mfn_x(mfn)));
+ iommu_ret = iommu_unmap(d, _dfn(mfn_x(mfn)),
+ PAGE_ORDER_4K);
else if ( type == PGT_writable_page )
- iommu_ret = iommu_map_page(d, _dfn(mfn_x(mfn)), mfn,
- IOMMUF_readable |
- IOMMUF_writable);
+ iommu_ret = iommu_map(d, _dfn(mfn_x(mfn)), mfn,
+ PAGE_ORDER_4K,
+ IOMMUF_readable | IOMMUF_writable);
}
}
if ( iommu_use_hap_pt(d) )
rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
else if ( need_iommu_pt_sync(d) )
- {
- dfn_t dfn = _dfn(gfn);
-
- if ( iommu_flags )
- for ( i = 0; i < (1 << order); i++ )
- {
- rc = iommu_map_page(d, dfn_add(dfn, i),
- mfn_add(mfn, i), iommu_flags);
- if ( unlikely(rc) )
- {
- while ( i-- )
- /* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(p2m->domain,
- dfn_add(dfn, i)) )
- continue;
-
- break;
- }
- }
- else
- for ( i = 0; i < (1 << order); i++ )
- {
- ret = iommu_unmap_page(d, dfn_add(dfn, i));
- if ( !rc )
- rc = ret;
- }
- }
+ rc = iommu_flags ?
+ iommu_map(d, _dfn(gfn), mfn, order, iommu_flags) :
+ iommu_unmap(d, _dfn(gfn), order);
}
unmap_domain_page(table);
unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma,
int sve)
{
+ struct domain *d = p2m->domain;
/* XXX -- this might be able to be faster iff current->domain == d */
void *table;
unsigned long gfn = gfn_x(gfn_);
- unsigned long i, gfn_remainder = gfn;
+ unsigned long gfn_remainder = gfn;
l1_pgentry_t *p2m_entry, entry_content;
/* Intermediate table to free if we're replacing it with a superpage. */
l1_pgentry_t intermediate_entry = l1e_empty();
t.gfn = gfn;
t.mfn = mfn_x(mfn);
t.p2mt = p2mt;
- t.d = p2m->domain->domain_id;
+ t.d = d->domain_id;
t.order = page_order;
__trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
{
ASSERT(rc == 0);
- if ( iommu_use_hap_pt(p2m->domain) )
- {
- if ( iommu_old_flags )
- amd_iommu_flush_pages(p2m->domain, gfn, page_order);
- }
- else if ( need_iommu_pt_sync(p2m->domain) )
- {
- dfn_t dfn = _dfn(gfn);
-
- if ( iommu_pte_flags )
- for ( i = 0; i < (1UL << page_order); i++ )
- {
- rc = iommu_map_page(p2m->domain, dfn_add(dfn, i),
- mfn_add(mfn, i), iommu_pte_flags);
- if ( unlikely(rc) )
- {
- while ( i-- )
- /* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(p2m->domain,
- dfn_add(dfn, i)) )
- continue;
-
- break;
- }
- }
- else
- for ( i = 0; i < (1UL << page_order); i++ )
- {
- int ret = iommu_unmap_page(p2m->domain,
- dfn_add(dfn, i));
-
- if ( !rc )
- rc = ret;
- }
- }
+ if ( need_iommu_pt_sync(p2m->domain) )
+ rc = iommu_pte_flags ?
+ iommu_map(d, _dfn(gfn), mfn, page_order, iommu_pte_flags) :
+ iommu_unmap(d, _dfn(gfn), page_order);
+ else if ( iommu_use_hap_pt(d) && iommu_old_flags )
+ amd_iommu_flush_pages(p2m->domain, gfn, page_order);
}
/*
p2m_access_t a;
if ( !paging_mode_translate(p2m->domain) )
- {
- int rc = 0;
-
- if ( need_iommu_pt_sync(p2m->domain) )
- {
- dfn_t dfn = _dfn(mfn);
-
- for ( i = 0; i < (1 << page_order); i++ )
- {
- int ret = iommu_unmap_page(p2m->domain, dfn_add(dfn, i));
-
- if ( !rc )
- rc = ret;
- }
- }
-
- return rc;
- }
+ return need_iommu_pt_sync(p2m->domain) ?
+ iommu_unmap(p2m->domain, _dfn(mfn), page_order) : 0;
ASSERT(gfn_locked_by_me(p2m, gfn));
P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_l, mfn);
int rc = 0;
if ( !paging_mode_translate(d) )
- {
- if ( need_iommu_pt_sync(d) && t == p2m_ram_rw )
- {
- dfn_t dfn = _dfn(mfn_x(mfn));
-
- for ( i = 0; i < (1 << page_order); i++ )
- {
- rc = iommu_map_page(d, dfn_add(dfn, i), mfn_add(mfn, i),
- IOMMUF_readable|IOMMUF_writable);
- if ( rc != 0 )
- {
- while ( i-- > 0 )
- /* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(d, dfn_add(dfn, i)) )
- continue;
-
- return rc;
- }
- }
- }
- return 0;
- }
+ return (need_iommu_pt_sync(d) && t == p2m_ram_rw) ?
+ iommu_map(d, _dfn(mfn_x(mfn)), mfn, page_order,
+ IOMMUF_readable | IOMMUF_writable) : 0;
/* foreign pages are added thru p2m_add_foreign */
if ( p2m_is_foreign(t) )
{
if ( !need_iommu_pt_sync(d) )
return 0;
- return iommu_map_page(d, _dfn(gfn_l), _mfn(gfn_l),
- IOMMUF_readable | IOMMUF_writable);
+ return iommu_map(d, _dfn(gfn_l), _mfn(gfn_l), PAGE_ORDER_4K,
+ IOMMUF_readable | IOMMUF_writable);
}
gfn_lock(p2m, gfn, 0);
{
if ( !need_iommu_pt_sync(d) )
return 0;
- return iommu_unmap_page(d, _dfn(gfn_l));
+ return iommu_unmap(d, _dfn(gfn_l), PAGE_ORDER_4K);
}
gfn_lock(p2m, gfn, 0);
!need_iommu_pt_sync(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
- if ( iommu_map_page(hardware_domain, _dfn(i), _mfn(i),
- IOMMUF_readable | IOMMUF_writable) )
+ if ( iommu_map(hardware_domain, _dfn(i), _mfn(i),
+ PAGE_ORDER_4K,
+ IOMMUF_readable | IOMMUF_writable) )
break;
if ( i != epfn )
{
while (i-- > old_max)
/* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(hardware_domain, _dfn(i)) )
+ if ( iommu_unmap(hardware_domain, _dfn(i), PAGE_ORDER_4K) )
continue;
goto destroy_m2p;
!(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
{
if ( !(kind & MAPKIND_WRITE) )
- err = iommu_map_page(ld, _dfn(mfn_x(mfn)), mfn,
- IOMMUF_readable | IOMMUF_writable);
+ err = iommu_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
+ IOMMUF_readable | IOMMUF_writable);
}
else if ( act_pin && !old_pin )
{
if ( !kind )
- err = iommu_map_page(ld, _dfn(mfn_x(mfn)), mfn,
- IOMMUF_readable);
+ err = iommu_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
+ IOMMUF_readable);
}
if ( err )
{
kind = mapkind(lgt, rd, op->mfn);
if ( !kind )
- err = iommu_unmap_page(ld, _dfn(mfn_x(op->mfn)));
+ err = iommu_unmap(ld, _dfn(mfn_x(op->mfn)), 0);
else if ( !(kind & MAPKIND_WRITE) )
- err = iommu_map_page(ld, _dfn(mfn_x(op->mfn)), op->mfn,
- IOMMUF_readable);
+ err = iommu_map(ld, _dfn(mfn_x(op->mfn)), op->mfn, 0,
+ IOMMUF_readable);
double_gt_unlock(lgt, rgt);
arch_iommu_domain_destroy(d);
}
-int iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int flags)
+int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+ unsigned int page_order, unsigned int flags)
{
const struct domain_iommu *hd = dom_iommu(d);
- int rc;
+ unsigned long i;
+ int rc = 0;
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->map_page(d, dfn, mfn, flags);
- if ( unlikely(rc) )
+ ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
+ ASSERT(IS_ALIGNED(mfn_x(mfn), (1ul << page_order)));
+
+ for ( i = 0; i < (1ul << page_order); i++ )
{
+ rc = hd->platform_ops->map_page(d, dfn_add(dfn, i),
+ mfn_add(mfn, i), flags);
+
+ if ( likely(!rc) )
+ continue;
+
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"d%d: IOMMU mapping dfn %"PRI_dfn" to mfn %"PRI_mfn" failed: %d\n",
- d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
+ d->domain_id, dfn_x(dfn_add(dfn, i)),
+ mfn_x(mfn_add(mfn, i)), rc);
+
+ while ( i-- )
+ /* if statement to satisfy __must_check */
+ if ( hd->platform_ops->unmap_page(d, dfn_add(dfn, i)) )
+ continue;
if ( !is_hardware_domain(d) )
domain_crash(d);
+
+ break;
}
return rc;
}
-int iommu_unmap_page(struct domain *d, dfn_t dfn)
+int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order)
{
const struct domain_iommu *hd = dom_iommu(d);
- int rc;
+ unsigned long i;
+ int rc = 0;
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->unmap_page(d, dfn);
- if ( unlikely(rc) )
+ ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
+
+ for ( i = 0; i < (1ul << page_order); i++ )
{
+ int err = hd->platform_ops->unmap_page(d, dfn_add(dfn, i));
+
+ if ( likely(!err) )
+ continue;
+
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"d%d: IOMMU unmapping dfn %"PRI_dfn" failed: %d\n",
- d->domain_id, dfn_x(dfn), rc);
+ d->domain_id, dfn_x(dfn_add(dfn, i)), err);
+
+ if ( !rc )
+ rc = err;
if ( !is_hardware_domain(d) )
+ {
domain_crash(d);
+ break;
+ }
}
return rc;
if ( paging_mode_translate(d) )
rc = set_identity_p2m_entry(d, pfn, p2m_access_rw, 0);
else
- rc = iommu_map_page(d, _dfn(pfn), _mfn(pfn),
- IOMMUF_readable | IOMMUF_writable);
+ rc = iommu_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
+ IOMMUF_readable | IOMMUF_writable);
if ( rc )
printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n",
d->domain_id, rc);
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
-int __must_check iommu_map_page(struct domain *d, dfn_t dfn,
- mfn_t mfn, unsigned int flags);
-int __must_check iommu_unmap_page(struct domain *d, dfn_t dfn);
+int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+ unsigned int page_order, unsigned int flags);
+int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
+ unsigned int page_order);
int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
unsigned int *flags);
extern struct page_list_head iommu_pt_cleanup_list;
#endif /* _IOMMU_H_ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */