ept_sync_domain(p2m);
/* For host p2m, may need to change VT-d page table.*/
- if ( rc == 0 && p2m_is_hostp2m(p2m) && need_iommu(d) &&
+ if ( rc == 0 && p2m_is_hostp2m(p2m) &&
need_modify_vtd_table )
{
- if ( iommu_hap_pt_share )
+ if ( iommu_use_hap_pt(d) )
rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
- else
+ else if ( need_iommu(d) )
{
dfn_t dfn = _dfn(gfn);
&& (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
- if ( iommu_enabled && need_iommu(p2m->domain) &&
- (iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) )
+ if ( iommu_enabled && (iommu_old_flags != iommu_pte_flags ||
+ old_mfn != mfn_x(mfn)) )
{
ASSERT(rc == 0);
if ( iommu_old_flags )
amd_iommu_flush_pages(p2m->domain, gfn, page_order);
}
- else
+ else if ( need_iommu(p2m->domain) )
{
dfn_t dfn = _dfn(gfn);
* - exclude PV guests, should execution reach this code for such.
* So be careful when altering this.
*/
- if ( !need_iommu(d) || !iommu_use_hap_pt(d) ||
+ if ( !iommu_use_hap_pt(d) ||
(start_fn & ((1UL << PAGE_ORDER_2M) - 1)) || !(nr >> PAGE_ORDER_2M) )
return PAGE_ORDER_4K;
void iommu_share_p2m_table(struct domain* d)
{
- if ( iommu_enabled && iommu_use_hap_pt(d) )
+ if ( iommu_use_hap_pt(d) )
iommu_get_ops()->share_p2m(d);
}
};
/* Always share P2M Table between the CPU and the IOMMU */
-#define iommu_use_hap_pt(d) (1)
+#define iommu_use_hap_pt(d) (need_iommu(d))
const struct iommu_ops *iommu_get_ops(void);
void __init iommu_set_ops(const struct iommu_ops *ops);
return -ENODEV;
}
-/* Does this domain have a P2M table we can use as its IOMMU pagetable? */
-#define iommu_use_hap_pt(d) (hap_enabled(d) && iommu_hap_pt_share)
+/* Are we using the domain P2M table as its IOMMU pagetable? */
+#define iommu_use_hap_pt(d) \
+ (hap_enabled(d) && need_iommu(d) && iommu_hap_pt_share)
void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned int value);
unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg);