return rc;
}
+int iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
+ unsigned int *flags)
+{
+ const struct domain_iommu *hd = dom_iommu(d);
+
+ if ( !iommu_enabled || !hd->platform_ops )
+ return -EOPNOTSUPP;
+
+ return hd->platform_ops->lookup_page(d, dfn, mfn, flags);
+}
+
static void iommu_free_pagetables(unsigned long unused)
{
do {
return dma_pte_clear_one(d, dfn_to_daddr(dfn));
}
+static int intel_iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
+ unsigned int *flags)
+{
+ struct domain_iommu *hd = dom_iommu(d);
+ struct dma_pte *page, val;
+ u64 pg_maddr;
+
+ /*
+ * If VT-d shares EPT page table or if the domain is the hardware
+ * domain and iommu_passthrough is set then pass back the dfn.
+ */
+ if ( iommu_use_hap_pt(d) ||
+ (iommu_hwdom_passthrough && is_hardware_domain(d)) )
+ return -EOPNOTSUPP;
+
+ spin_lock(&hd->arch.mapping_lock);
+
+ pg_maddr = addr_to_dma_page_maddr(d, dfn_to_daddr(dfn), 0);
+ if ( !pg_maddr )
+ {
+ spin_unlock(&hd->arch.mapping_lock);
+ return -ENOMEM;
+ }
+
+ page = map_vtd_domain_page(pg_maddr);
+ val = page[dfn_x(dfn) & LEVEL_MASK];
+
+ unmap_vtd_domain_page(page);
+ spin_unlock(&hd->arch.mapping_lock);
+
+ if ( !dma_pte_present(val) )
+ return -ENOENT;
+
+ *mfn = maddr_to_mfn(dma_pte_addr(val));
+ *flags = dma_pte_read(val) ? IOMMUF_readable : 0;
+ *flags |= dma_pte_write(val) ? IOMMUF_writable : 0;
+
+ return 0;
+}
+
int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte,
int order, int present)
{
.teardown = iommu_domain_teardown,
.map_page = intel_iommu_map_page,
.unmap_page = intel_iommu_unmap_page,
+ .lookup_page = intel_iommu_lookup_page,
.free_page_table = iommu_free_page_table,
.reassign_device = reassign_device_ownership,
.get_device_group_id = intel_iommu_group_id,
#define dma_set_pte_prot(p, prot) do { \
(p).val = ((p).val & ~DMA_PTE_PROT) | ((prot) & DMA_PTE_PROT); \
} while (0)
+#define dma_pte_prot(p) ((p).val & DMA_PTE_PROT)
+#define dma_pte_read(p) (dma_pte_prot(p) & DMA_PTE_READ)
+#define dma_pte_write(p) (dma_pte_prot(p) & DMA_PTE_WRITE)
#define dma_pte_addr(p) ((p).val & PADDR_MASK & PAGE_MASK_4K)
#define dma_set_pte_addr(p, addr) do {\
(p).val |= ((addr) & PAGE_MASK_4K); } while (0)
int __must_check iommu_map_page(struct domain *d, dfn_t dfn,
mfn_t mfn, unsigned int flags);
int __must_check iommu_unmap_page(struct domain *d, dfn_t dfn);
+int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
+ unsigned int *flags);
enum iommu_feature
{
#endif /* HAS_PCI */
void (*teardown)(struct domain *d);
+
+ /*
+ * This block of operations must be appropriately locked against each
+ * other by the caller in order to have meaningful results.
+ */
int __must_check (*map_page)(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned int flags);
int __must_check (*unmap_page)(struct domain *d, dfn_t dfn);
+ int __must_check (*lookup_page)(struct domain *d, dfn_t dfn, mfn_t *mfn,
+ unsigned int *flags);
+
void (*free_page_table)(struct page_info *);
#ifdef CONFIG_X86
void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned int value);