int preemptible)
{
unsigned long nx, x, y = page->u.inuse.type_info;
- int rc = 0;
+ int rc = 0, iommu_ret = 0;
ASSERT(!(type & ~(PGT_type_mask | PGT_pae_xen_l2)));
if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
{
if ( (x & PGT_type_mask) == PGT_writable_page )
- iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
+ iommu_ret = iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
else if ( type == PGT_writable_page )
- iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
- page_to_mfn(page),
- IOMMUF_readable|IOMMUF_writable);
+ iommu_ret = iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
+ page_to_mfn(page),
+ IOMMUF_readable|IOMMUF_writable);
}
}
if ( (x & PGT_partial) && !(nx & PGT_partial) )
put_page(page);
+ if ( !rc )
+ rc = iommu_ret;
+
return rc;
}
unsigned long gfn_remainder = gfn;
unsigned int i, target = order / EPT_TABLE_ORDER;
int ret, rc = 0;
+ bool_t entry_written = 0;
bool_t direct_mmio = (p2mt == p2m_mmio_direct);
uint8_t ipat = 0;
bool_t need_modify_vtd_table = 1;
rc = atomic_write_ept_entry(ept_entry, new_entry, target);
if ( unlikely(rc) )
old_entry.epte = 0;
- else if ( p2mt != p2m_invalid &&
- (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
- /* Track the highest gfn for which we have ever had a valid mapping */
- p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
+ else
+ {
+ entry_written = 1;
+
+ if ( p2mt != p2m_invalid &&
+ (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
+ /* Track the highest gfn for which we have ever had a valid mapping */
+ p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
+ }
out:
if ( needs_sync )
{
if ( iommu_flags )
for ( i = 0; i < (1 << order); i++ )
- iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+ {
+ rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+ if ( unlikely(rc) )
+ {
+ while ( i-- )
+ /* If statement to satisfy __must_check. */
+ if ( iommu_unmap_page(p2m->domain, gfn + i) )
+ continue;
+
+ break;
+ }
+ }
else
for ( i = 0; i < (1 << order); i++ )
- iommu_unmap_page(d, gfn + i);
+ {
+ ret = iommu_unmap_page(d, gfn + i);
+ if ( !rc )
+ rc = ret;
+ }
}
}
if ( is_epte_present(&old_entry) )
ept_free_entry(p2m, &old_entry, target);
- if ( rc == 0 && p2m_is_hostp2m(p2m) )
+ if ( entry_written && p2m_is_hostp2m(p2m) )
p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, p2ma);
return rc;
if ( iommu_enabled && need_iommu(p2m->domain) &&
(iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) )
{
+ ASSERT(rc == 0);
+
if ( iommu_use_hap_pt(p2m->domain) )
{
if ( iommu_old_flags )
}
else if ( iommu_pte_flags )
for ( i = 0; i < (1UL << page_order); i++ )
- iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
- iommu_pte_flags);
+ {
+ rc = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
+ iommu_pte_flags);
+ if ( unlikely(rc) )
+ {
+ while ( i-- )
+ /* If statement to satisfy __must_check. */
+ if ( iommu_unmap_page(p2m->domain, gfn + i) )
+ continue;
+
+ break;
+ }
+ }
else
for ( i = 0; i < (1UL << page_order); i++ )
- iommu_unmap_page(p2m->domain, gfn + i);
+ {
+ int ret = iommu_unmap_page(p2m->domain, gfn + i);
+
+ if ( !rc )
+ rc = ret;
+ }
}
/*
if ( !paging_mode_translate(p2m->domain) )
{
+ int rc = 0;
+
if ( need_iommu(p2m->domain) )
+ {
for ( i = 0; i < (1 << page_order); i++ )
- iommu_unmap_page(p2m->domain, mfn + i);
- return 0;
+ {
+ int ret = iommu_unmap_page(p2m->domain, mfn + i);
+
+ if ( !rc )
+ rc = ret;
+ }
+ }
+
+ return rc;
}
ASSERT(gfn_locked_by_me(p2m, gfn));
if ( rc != 0 )
{
while ( i-- > 0 )
- iommu_unmap_page(d, mfn + i);
+ /* If statement to satisfy __must_check. */
+ if ( iommu_unmap_page(d, mfn + i) )
+ continue;
+
return rc;
}
}
if ( i != epfn )
{
while (i-- > old_max)
- iommu_unmap_page(hardware_domain, i);
+ /* If statement to satisfy __must_check. */
+ if ( iommu_unmap_page(hardware_domain, i) )
+ continue;
+
goto destroy_m2p;
}
}
if ( !iommu_passthrough && !need_iommu(d) )
{
+ int rc = 0;
+
/* Set up 1:1 page table for dom0 */
for ( i = 0; i < max_pdx; i++ )
{
* a pfn_valid() check would seem desirable here.
*/
if ( mfn_valid(pfn) )
- amd_iommu_map_page(d, pfn, pfn,
- IOMMUF_readable|IOMMUF_writable);
+ {
+ int ret = amd_iommu_map_page(d, pfn, pfn,
+ IOMMUF_readable|IOMMUF_writable);
+
+ if ( !rc )
+ rc = ret;
+ }
if ( !(i & 0xfffff) )
process_pending_softirqs();
}
+
+ if ( rc )
+ AMD_IOMMU_DEBUG("d%d: IOMMU mapping failed: %d\n",
+ d->domain_id, rc);
}
for_each_amd_iommu ( iommu )
{
struct page_info *page;
unsigned int i = 0;
+ int rc = 0;
+
page_list_for_each ( page, &d->page_list )
{
unsigned long mfn = page_to_mfn(page);
unsigned long gfn = mfn_to_gmfn(d, mfn);
unsigned int mapping = IOMMUF_readable;
+ int ret;
if ( ((page->u.inuse.type_info & PGT_count_mask) == 0) ||
((page->u.inuse.type_info & PGT_type_mask)
== PGT_writable_page) )
mapping |= IOMMUF_writable;
- hd->platform_ops->map_page(d, gfn, mfn, mapping);
+
+ ret = hd->platform_ops->map_page(d, gfn, mfn, mapping);
+ if ( !rc )
+ rc = ret;
+
if ( !(i++ & 0xfffff) )
process_pending_softirqs();
}
+
+ if ( rc )
+ printk(XENLOG_WARNING "d%d: IOMMU mapping failed: %d\n",
+ d->domain_id, rc);
}
return hd->platform_ops->hwdom_init(d);
for ( i = 0; i < top; i++ )
{
+ int rc = 0;
+
/*
* Set up 1:1 mapping for dom0. Default to use only conventional RAM
* areas and let RMRRs include needed reserved regions. When set, the
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
for ( j = 0; j < tmp; j++ )
- iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
- IOMMUF_readable|IOMMUF_writable);
+ {
+ int ret = iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
+ IOMMUF_readable|IOMMUF_writable);
+
+ if ( !rc )
+ rc = ret;
+ }
+
+ if ( rc )
+ printk(XENLOG_WARNING VTDPREFIX " d%d: IOMMU mapping failed: %d\n",
+ d->domain_id, rc);
if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
process_pending_softirqs();
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
-int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
- unsigned int flags);
-int iommu_unmap_page(struct domain *d, unsigned long gfn);
+int __must_check iommu_map_page(struct domain *d, unsigned long gfn,
+ unsigned long mfn, unsigned int flags);
+int __must_check iommu_unmap_page(struct domain *d, unsigned long gfn);
enum iommu_feature
{