dte[1] = entry;
}
-u64 amd_iommu_get_next_table_from_pte(u32 *entry)
+uint64_t amd_iommu_get_address_from_pte(void *pte)
{
- u64 addr_lo, addr_hi, ptr;
+ uint32_t *entry = pte;
+ uint64_t addr_lo, addr_hi, ptr;
- addr_lo = get_field_from_reg_u32(
- entry[0],
- IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
- IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT);
+ addr_lo = get_field_from_reg_u32(entry[0],
+ IOMMU_PTE_ADDR_LOW_MASK,
+ IOMMU_PTE_ADDR_LOW_SHIFT);
- addr_hi = get_field_from_reg_u32(
- entry[1],
- IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
- IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT);
+ addr_hi = get_field_from_reg_u32(entry[1],
+ IOMMU_PTE_ADDR_HIGH_MASK,
+ IOMMU_PTE_ADDR_HIGH_SHIFT);
ptr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
return ptr;
pde = table + pfn_to_pde_idx(dfn, merge_level);
/* get page table of next level */
- ntable_maddr = amd_iommu_get_next_table_from_pte((u32*)pde);
+ ntable_maddr = amd_iommu_get_address_from_pte(pde);
ntable = map_domain_page(_mfn(paddr_to_pfn(ntable_maddr)));
/* get the first mfn of next level */
- first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >> PAGE_SHIFT;
+ first_mfn = amd_iommu_get_address_from_pte(ntable) >> PAGE_SHIFT;
if ( first_mfn == 0 )
goto out;
pde = table + pfn_to_pde_idx(dfn, merge_level);
/* get first mfn */
- ntable_mfn = amd_iommu_get_next_table_from_pte((u32*)pde) >> PAGE_SHIFT;
+ ntable_mfn = amd_iommu_get_address_from_pte(pde) >> PAGE_SHIFT;
if ( ntable_mfn == 0 )
{
}
ntable = map_domain_page(_mfn(ntable_mfn));
- first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >> PAGE_SHIFT;
+ first_mfn = amd_iommu_get_address_from_pte(ntable) >> PAGE_SHIFT;
if ( first_mfn == 0 )
{
pde = next_table_vaddr + pfn_to_pde_idx(dfn, level);
/* Here might be a super page frame */
- next_table_mfn = amd_iommu_get_next_table_from_pte((uint32_t*)pde)
- >> PAGE_SHIFT;
+ next_table_mfn = amd_iommu_get_address_from_pte(pde) >> PAGE_SHIFT;
/* Split super page frame into smaller pieces.*/
if ( iommu_is_pte_present((u32*)pde) &&
mfn_x(pgd_mfn));
}
}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ )
{
pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
- next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
- next_level = iommu_next_level((u32*)pde);
+ next_table_maddr = amd_iommu_get_address_from_pte(pde);
+ next_level = iommu_next_level(pde);
if ( (next_table_maddr != 0) && (next_level != 0) &&
- iommu_is_pte_present((u32*)pde) )
+ iommu_is_pte_present(pde) )
{
/* We do not support skip levels yet */
ASSERT(next_level == level - 1);
process_pending_softirqs();
pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
- next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
- entry = (u32*)pde;
+ next_table_maddr = amd_iommu_get_address_from_pte(pde);
+ entry = pde;
present = get_field_from_reg_u32(entry[0],
IOMMU_PDE_PRESENT_MASK,
int __must_check amd_iommu_map_page(struct domain *d, dfn_t dfn,
mfn_t mfn, unsigned int flags);
int __must_check amd_iommu_unmap_page(struct domain *d, dfn_t dfn);
-u64 amd_iommu_get_next_table_from_pte(u32 *entry);
+uint64_t amd_iommu_get_address_from_pte(void *entry);
int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
u64 phys_addr, unsigned long size,