if ( lpae_is_valid(orig_pte) && entry->p2m.base != orig_pte.p2m.base )
p2m_free_entry(p2m, orig_pte, level);
- if ( need_iommu(p2m->domain) &&
+ if ( need_iommu_pt_sync(p2m->domain) &&
(lpae_is_valid(orig_pte) || lpae_is_valid(*entry)) )
rc = iommu_iotlb_flush(p2m->domain, _dfn(gfn_x(sgfn)),
1UL << page_order);
void memory_type_changed(struct domain *d)
{
- if ( need_iommu(d) && d->vcpu && d->vcpu[0] )
+ if ( has_iommu_pt(d) && d->vcpu && d->vcpu[0] )
{
p2m_memory_type_changed(d);
flush_all(FLUSH_CACHE);
return MTRR_TYPE_UNCACHABLE;
}
- if ( !need_iommu(d) && !cache_flush_permitted(d) )
+ if ( !has_iommu_pt(d) && !cache_flush_permitted(d) )
{
*ipat = 1;
return MTRR_TYPE_WRBACK;
{
/* Special pages should not be accessible from devices. */
struct domain *d = page_get_owner(page);
- if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
+ if ( d && is_pv_domain(d) && unlikely(need_iommu_pt_sync(d)) )
{
mfn_t mfn = page_to_mfn(page);
case XEN_DOMCTL_MEM_SHARING_CONTROL:
{
rc = 0;
- if ( unlikely(need_iommu(d) && mec->u.enable) )
+ if ( unlikely(has_iommu_pt(d) && mec->u.enable) )
rc = -EXDEV;
else
d->arch.hvm.mem_sharing_enabled = mec->u.enable;
{
if ( iommu_use_hap_pt(d) )
rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
- else if ( need_iommu(d) )
+ else if ( need_iommu_pt_sync(d) )
{
dfn_t dfn = _dfn(gfn);
if ( iommu_old_flags )
amd_iommu_flush_pages(p2m->domain, gfn, page_order);
}
- else if ( need_iommu(p2m->domain) )
+ else if ( need_iommu_pt_sync(p2m->domain) )
{
dfn_t dfn = _dfn(gfn);
{
int rc = 0;
- if ( need_iommu(p2m->domain) )
+ if ( need_iommu_pt_sync(p2m->domain) )
{
dfn_t dfn = _dfn(mfn);
if ( !paging_mode_translate(d) )
{
- if ( need_iommu(d) && t == p2m_ram_rw )
+ if ( need_iommu_pt_sync(d) && t == p2m_ram_rw )
{
dfn_t dfn = _dfn(mfn_x(mfn));
if ( !paging_mode_translate(p2m->domain) )
{
- if ( !need_iommu(d) )
+ if ( !need_iommu_pt_sync(d) )
return 0;
return iommu_map_page(d, _dfn(gfn_l), _mfn(gfn_l),
IOMMUF_readable | IOMMUF_writable);
if ( !paging_mode_translate(d) )
{
- if ( !need_iommu(d) )
+ if ( !need_iommu_pt_sync(d) )
return 0;
return iommu_unmap_page(d, _dfn(gfn_l));
}
{
int ret;
- if ( need_iommu(d) && log_global )
+ if ( has_iommu_pt(d) && log_global )
{
/*
* Refuse to turn on global log-dirty mode
if ( ret )
goto destroy_m2p;
- if ( iommu_enabled && !iommu_hwdom_passthrough &&
- !need_iommu(hardware_domain) )
+ /*
+ * If hardware domain has IOMMU mappings but page tables are not
+ * shared or being kept in sync then newly added memory needs to be
+ * mapped here.
+ */
+ if ( has_iommu_pt(hardware_domain) &&
+ !iommu_use_hap_pt(hardware_domain) &&
+ !need_iommu_pt_sync(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
if ( iommu_map_page(hardware_domain, _dfn(i), _mfn(i),
xatp->gpfn += start;
xatp->size -= start;
-#ifdef CONFIG_HAS_PASSTHROUGH
- if ( need_iommu(d) )
- this_cpu(iommu_dont_flush_iotlb) = 1;
-#endif
+ if ( has_iommu_pt(d) )
+ this_cpu(iommu_dont_flush_iotlb) = 1;
while ( xatp->size > done )
{
}
}
-#ifdef CONFIG_HAS_PASSTHROUGH
- if ( need_iommu(d) )
+ if ( has_iommu_pt(d) )
{
int ret;
if ( unlikely(ret) && rc >= 0 )
rc = ret;
}
-#endif
return rc;
}
/* No paging if iommu is used */
rc = -EMLINK;
- if ( unlikely(need_iommu(d)) )
+ if ( unlikely(has_iommu_pt(d)) )
break;
rc = -EXDEV;
if ( !list_empty(&dev->domain_list) )
goto fail;
- if ( need_iommu(d) <= 0 )
- {
- /*
- * The hwdom is forced to use IOMMU for protecting assigned
- * device. Therefore the IOMMU data is already set up.
- */
- ASSERT(!is_hardware_domain(d));
- rc = iommu_construct(d);
- if ( rc )
- goto fail;
- }
+ /*
+ * The hwdom is forced to use IOMMU for protecting assigned
+ * device. Therefore the IOMMU data is already set up.
+ */
+ ASSERT(!is_hardware_domain(d) ||
+ hd->status == IOMMU_STATUS_initialized);
+
+ rc = iommu_construct(d);
+ if ( rc )
+ goto fail;
/* The flag field doesn't matter to DT device. */
rc = hd->platform_ops->assign_device(d, 0, dt_to_dev(dev), 0);
void __hwdom_init iommu_hwdom_init(struct domain *d)
{
- const struct domain_iommu *hd = dom_iommu(d);
+ struct domain_iommu *hd = dom_iommu(d);
check_hwdom_reqs(d);
return;
register_keyhandler('o', &iommu_dump_p2m_table, "dump iommu p2m table", 0);
- d->need_iommu = iommu_hwdom_strict;
- if ( need_iommu(d) && !iommu_use_hap_pt(d) )
+
+ hd->status = IOMMU_STATUS_initializing;
+ hd->need_sync = iommu_hwdom_strict && !iommu_use_hap_pt(d);
+ if ( need_iommu_pt_sync(d) )
{
struct page_info *page;
unsigned int i = 0;
}
hd->platform_ops->hwdom_init(d);
+
+ hd->status = IOMMU_STATUS_initialized;
}
void iommu_teardown(struct domain *d)
{
- const struct domain_iommu *hd = dom_iommu(d);
+ struct domain_iommu *hd = dom_iommu(d);
- d->need_iommu = 0;
+ hd->status = IOMMU_STATUS_disabled;
hd->platform_ops->teardown(d);
tasklet_schedule(&iommu_pt_cleanup_tasklet);
}
int iommu_construct(struct domain *d)
{
- if ( need_iommu(d) > 0 )
+ struct domain_iommu *hd = dom_iommu(d);
+
+ if ( hd->status == IOMMU_STATUS_initialized )
return 0;
if ( !iommu_use_hap_pt(d) )
{
int rc;
+ hd->status = IOMMU_STATUS_initializing;
+ hd->need_sync = true;
+
rc = arch_iommu_populate_page_table(d);
if ( rc )
+ {
+ if ( rc != -ERESTART )
+ {
+ hd->need_sync = false;
+ hd->status = IOMMU_STATUS_disabled;
+ }
+
return rc;
+ }
}
- d->need_iommu = 1;
+ hd->status = IOMMU_STATUS_initialized;
+
/*
* There may be dirty cache lines when a device is assigned
- * and before need_iommu(d) becoming true, this will cause
+ * and before has_iommu_pt(d) becoming true, this will cause
* memory_type_changed lose effect if memory type changes.
* Call memory_type_changed here to amend this.
*/
ops = iommu_get_ops();
for_each_domain(d)
{
- if ( is_hardware_domain(d) || need_iommu(d) <= 0 )
+ if ( is_hardware_domain(d) ||
+ dom_iommu(d)->status < IOMMU_STATUS_initialized )
continue;
if ( iommu_use_hap_pt(d) )
/* Prevent device assign if mem paging or mem sharing have been
* enabled for this domain */
- if ( unlikely(!need_iommu(d) &&
- (d->arch.hvm.mem_sharing_enabled ||
- vm_event_check_ring(d->vm_event_paging) ||
- p2m_get_hostp2m(d)->global_logdirty)) )
+ if ( unlikely(d->arch.hvm.mem_sharing_enabled ||
+ vm_event_check_ring(d->vm_event_paging) ||
+ p2m_get_hostp2m(d)->global_logdirty) )
return -EXDEV;
if ( !pcidevs_trylock() )
}
done:
- if ( !has_arch_pdevs(d) && need_iommu(d) )
+ if ( !has_arch_pdevs(d) && has_iommu_pt(d) )
iommu_teardown(d);
pcidevs_unlock();
pdev->fault.count = 0;
- if ( !has_arch_pdevs(d) && need_iommu(d) )
+ if ( !has_arch_pdevs(d) && has_iommu_pt(d) )
iommu_teardown(d);
return ret;
struct page_info *page;
int rc = 0, n = 0;
- d->need_iommu = -1;
-
this_cpu(iommu_dont_flush_iotlb) = 1;
spin_lock(&d->page_alloc_lock);
gfn_x(((i) >= nr_status_frames(t)) ? INVALID_GFN : (t)->arch.status_gfn[i])
#define gnttab_need_iommu_mapping(d) \
- (is_domain_direct_mapped(d) && need_iommu(d))
+ (is_domain_direct_mapped(d) && need_iommu_pt_sync(d))
#endif /* __ASM_GRANT_TABLE_H__ */
/*
};
/* Always share P2M Table between the CPU and the IOMMU */
-#define iommu_use_hap_pt(d) (need_iommu(d))
+#define iommu_use_hap_pt(d) (has_iommu_pt(d))
const struct iommu_ops *iommu_get_ops(void);
void __init iommu_set_ops(const struct iommu_ops *ops);
#define gnttab_release_host_mappings(domain) ( paging_mode_external(domain) )
#define gnttab_need_iommu_mapping(d) \
- (!paging_mode_translate(d) && need_iommu(d))
+ (!paging_mode_translate(d) && need_iommu_pt_sync(d))
#endif /* __ASM_GRANT_TABLE_H__ */
/* Are we using the domain P2M table as its IOMMU pagetable? */
#define iommu_use_hap_pt(d) \
- (hap_enabled(d) && need_iommu(d) && iommu_hap_pt_share)
+ (hap_enabled(d) && has_iommu_pt(d) && iommu_hap_pt_share)
void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned int value);
unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg);
bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature);
+enum iommu_status
+{
+ IOMMU_STATUS_disabled,
+ IOMMU_STATUS_initializing,
+ IOMMU_STATUS_initialized
+};
+
struct domain_iommu {
struct arch_iommu arch;
/* Features supported by the IOMMU */
DECLARE_BITMAP(features, IOMMU_FEAT_count);
+
+ /* Status of guest IOMMU mappings */
+ enum iommu_status status;
+
+ /*
+ * Does the guest reqire mappings to be synchonized, to maintain
+ * the default dfn == pfn map. (See comment on dfn at the top of
+ * include/xen/mm.h).
+ */
+ bool need_sync;
};
#define dom_iommu(d) (&(d)->iommu)
#ifdef CONFIG_HAS_PASSTHROUGH
struct domain_iommu iommu;
-
- /* Does this guest need iommu mappings (-1 meaning "being set up")? */
- s8 need_iommu;
#endif
/* is node-affinity automatically computed? */
bool auto_node_affinity;
#define is_pinned_vcpu(v) ((v)->domain->is_pinned || \
cpumask_weight((v)->cpu_hard_affinity) == 1)
#ifdef CONFIG_HAS_PASSTHROUGH
-#define need_iommu(d) ((d)->need_iommu)
+#define has_iommu_pt(d) (dom_iommu(d)->status != IOMMU_STATUS_disabled)
+#define need_iommu_pt_sync(d) (dom_iommu(d)->need_sync)
#else
-#define need_iommu(d) (0)
+#define has_iommu_pt(d) false
+#define need_iommu_pt_sync(d) false
#endif
static inline bool is_vcpu_online(const struct vcpu *v)