iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
else
{
- if ( p2mt == p2m_ram_rw )
+ unsigned int flags = p2m_get_iommu_flags(p2mt);
+
+ if ( flags != 0 )
for ( i = 0; i < (1 << order); i++ )
- iommu_map_page(d, gfn + i, mfn_x(mfn) + i,
- IOMMUF_readable | IOMMUF_writable);
+ iommu_map_page(d, gfn + i, mfn_x(mfn) + i, flags);
else
for ( i = 0; i < (1 << order); i++ )
iommu_unmap_page(d, gfn + i);
l2_pgentry_t l2e_content;
l3_pgentry_t l3e_content;
int rc;
- unsigned int iommu_pte_flags = (p2mt == p2m_ram_rw) ?
- IOMMUF_readable|IOMMUF_writable:
- 0;
+ unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt);
unsigned long old_mfn = 0;
if ( tb_init_done )
}
else
{
- if ( p2mt == p2m_ram_rw )
+ unsigned int flags = p2m_get_iommu_flags(p2mt);
+
+ if ( flags != 0 )
for ( i = 0; i < (1UL << page_order); i++ )
- iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
- IOMMUF_readable|IOMMUF_writable);
+ iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i, flags);
else
for ( int i = 0; i < (1UL << page_order); i++ )
iommu_unmap_page(p2m->domain, gfn+i);
void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
l1_pgentry_t *p, l1_pgentry_t new, unsigned int level);
+/*
+ * p2m type to IOMMU flags
+ */
+static inline unsigned int p2m_get_iommu_flags(p2m_type_t p2mt)
+{
+ unsigned int flags;
+
+ switch( p2mt )
+ {
+ case p2m_ram_rw:
+ case p2m_grant_map_rw:
+ case p2m_ram_logdirty:
+ case p2m_map_foreign:
+ flags = IOMMUF_readable | IOMMUF_writable;
+ break;
+ case p2m_ram_ro:
+ case p2m_grant_map_ro:
+ flags = IOMMUF_readable;
+ break;
+ default:
+ flags = 0;
+ break;
+ }
+
+ return flags;
+}
+
#endif /* _XEN_P2M_H */
/*