return ptr;
}
-static unsigned int iommu_next_level(u32 *entry)
-{
- return get_field_from_reg_u32(entry[0],
- IOMMU_PDE_NEXT_LEVEL_MASK,
- IOMMU_PDE_NEXT_LEVEL_SHIFT);
-}
-
-static int amd_iommu_is_pte_present(u32 *entry)
-{
- return get_field_from_reg_u32(entry[0],
- IOMMU_PDE_PRESENT_MASK,
- IOMMU_PDE_PRESENT_SHIFT);
-}
-
/* For each pde, We use ignored bits (bit 1 - bit 8 and bit 63)
* to save pde count, pde count = 511 is a candidate of page coalescing.
*/
>> PAGE_SHIFT;
/* Split super page frame into smaller pieces.*/
- if ( amd_iommu_is_pte_present((u32*)pde) &&
+ if ( iommu_is_pte_present((u32*)pde) &&
(iommu_next_level((u32*)pde) == 0) &&
next_table_mfn != 0 )
{
}
/* Install lower level page table for non-present entries */
- else if ( !amd_iommu_is_pte_present((u32*)pde) )
+ else if ( !iommu_is_pte_present((u32*)pde) )
{
if ( next_table_mfn == 0 )
{
{
void *table_vaddr, *pde;
u64 next_table_maddr;
- int index, next_level, present;
- u32 *entry;
+ int index, next_level;
table_vaddr = __map_domain_page(pg);
{
pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
- entry = (u32*)pde;
-
- next_level = get_field_from_reg_u32(entry[0],
- IOMMU_PDE_NEXT_LEVEL_MASK,
- IOMMU_PDE_NEXT_LEVEL_SHIFT);
- present = get_field_from_reg_u32(entry[0],
- IOMMU_PDE_PRESENT_MASK,
- IOMMU_PDE_PRESENT_SHIFT);
+ next_level = iommu_next_level((u32*)pde);
if ( (next_table_maddr != 0) && (next_level != 0)
- && present )
+ && iommu_is_pte_present((u32*)pde) )
{
deallocate_next_page_table(
maddr_to_page(next_table_maddr), level - 1);
IOMMU_REG_BASE_ADDR_HIGH_SHIFT, reg);
}
+static inline int iommu_is_pte_present(const u32 *entry)
+{
+ return get_field_from_reg_u32(entry[0],
+ IOMMU_PDE_PRESENT_MASK,
+ IOMMU_PDE_PRESENT_SHIFT);
+}
+
+static inline unsigned int iommu_next_level(const u32 *entry)
+{
+ return get_field_from_reg_u32(entry[0],
+ IOMMU_PDE_NEXT_LEVEL_MASK,
+ IOMMU_PDE_NEXT_LEVEL_SHIFT);
+}
+
#endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */