if ( dma_pte_addr(*pte) == 0 )
{
pg = alloc_domheap_page(NULL);
- vaddr = map_domain_page(mfn_x(page_to_mfn(pg)));
+ vaddr = map_domain_page(page_to_mfn(pg));
if ( !vaddr )
{
spin_unlock_irqrestore(&hd->mapping_lock, flags);
else
{
pg = maddr_to_page(pte->val);
- vaddr = map_domain_page(mfn_x(page_to_mfn(pg)));
+ vaddr = map_domain_page(page_to_mfn(pg));
if ( !vaddr )
{
spin_unlock_irqrestore(&hd->mapping_lock, flags);
if ( level == total )
return pg;
- parent = map_domain_page(mfn_x(page_to_mfn(pg)));
+ parent = map_domain_page(page_to_mfn(pg));
total--;
}
pg = dma_addr_level_page(domain, addr, 1);
if ( !pg )
return;
- pte = (struct dma_pte *)map_domain_page(mfn_x(page_to_mfn(pg)));
+ pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
pte += address_level_offset(addr, 1);
if ( pte )
{
pg = dma_addr_level_page(domain, tmp, level);
if ( !pg )
return;
- pte = (struct dma_pte *)map_domain_page(mfn_x(page_to_mfn(pg)));
+ pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
pte += address_level_offset(tmp, level);
dma_clear_pte(*pte);
iommu_flush_cache_entry(iommu, pte);
pg = addr_to_dma_page(d, gfn << PAGE_SHIFT_4K);
if ( !pg )
return -ENOMEM;
- pte = (struct dma_pte *)map_domain_page(mfn_x(page_to_mfn(pg)));
+ pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
pte += mfn & LEVEL_MASK;
dma_set_pte_addr(*pte, mfn << PAGE_SHIFT_4K);
dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE);
pg = addr_to_dma_page(domain, iova + PAGE_SIZE_4K * index);
if ( !pg )
return -ENOMEM;
- pte = (struct dma_pte *)map_domain_page(mfn_x(page_to_mfn(pg)));
+ pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
pte += start_pfn & LEVEL_MASK;
dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);
dma_set_pte_prot(*pte, prot);