amd_iommu_get_paging_mode() expects a count, not a "maximum possible"
value. Prior to
b4f042236ae0 dropping the reference, the use of our mis-
named "max_page" in amd_iommu_domain_init() may have lead to such a
misunderstanding. In an attempt to avoid such confusion in the future,
rename the function's parameter and - while at it - convert it to an
inline function.
Also replace a literal 4 by an expression tying it to a wider use
constant, just like amd_iommu_quarantine_init() does.
Fixes: ea38867831da ("x86 / iommu: set up a scratch page in the quarantine domain")
Fixes: b4f042236ae0 ("AMD/IOMMU: Cease using a dynamic height for the IOMMU pagetables")
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
master commit:
b75b3c62fe4afe381c6f74a07f614c0b39fe2f5d
master date: 2020-03-16 11:24:29 +0100
int __init amd_iommu_quarantine_init(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
- unsigned long max_gfn =
- PFN_DOWN((1ul << DEFAULT_DOMAIN_ADDRESS_WIDTH) - 1);
- unsigned int level = amd_iommu_get_paging_mode(max_gfn);
+ unsigned long end_gfn =
+ 1ul << (DEFAULT_DOMAIN_ADDRESS_WIDTH - PAGE_SHIFT);
+ unsigned int level = amd_iommu_get_paging_mode(end_gfn);
uint64_t *table;
if ( hd->arch.root_table )
return rc;
}
-int amd_iommu_get_paging_mode(unsigned long entries)
-{
- int level = 1;
-
- BUG_ON( !entries );
-
- while ( entries > PTE_PER_TABLE_SIZE )
- {
- entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT;
- if ( ++level > 6 )
- return -ENOMEM;
- }
-
- return level;
-}
-
static int amd_iommu_domain_init(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
* physical address space we give it, but this isn't known yet so use 4
* unilaterally.
*/
- hd->arch.paging_mode = is_hvm_domain(d)
- ? IOMMU_PAGING_MODE_LEVEL_4
- : amd_iommu_get_paging_mode(get_upper_mfn_bound());
+ hd->arch.paging_mode = amd_iommu_get_paging_mode(
+ is_hvm_domain(d)
+ ? 1ul << (DEFAULT_DOMAIN_ADDRESS_WIDTH - PAGE_SHIFT)
+ : get_upper_mfn_bound() + 1);
return 0;
}
int amd_iommu_init(void);
int amd_iommu_update_ivrs_mapping_acpi(void);
-int amd_iommu_get_paging_mode(unsigned long entries);
int amd_iommu_quarantine_init(struct domain *d);
/* mapping functions */
return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
}
+static inline int amd_iommu_get_paging_mode(unsigned long max_frames)
+{
+ int level = 1;
+
+ BUG_ON(!max_frames);
+
+ while ( max_frames > PTE_PER_TABLE_SIZE )
+ {
+ max_frames = PTE_PER_TABLE_ALIGN(max_frames) >> PTE_PER_TABLE_SHIFT;
+ if ( ++level > 6 )
+ return -ENOMEM;
+ }
+
+ return level;
+}
+
static inline struct page_info* alloc_amd_iommu_pgtable(void)
{
struct page_info *pg;