static int mem_hotadd_check(unsigned long spfn, unsigned long epfn)
{
unsigned long s, e, length, sidx, eidx;
+ paddr_t mem_base = pfn_to_paddr(spfn);
+ unsigned long mem_npages = epfn - spfn;
if ( (spfn >= epfn) )
return 0;
if ( (spfn | epfn) & ((1UL << PAGETABLE_ORDER) - 1) )
return 0;
- if ( (spfn | epfn) & pfn_hole_mask )
+ if ( !pdx_is_region_compressible(mem_base, mem_npages) )
return 0;
/* Make sure the new range is not present now */
length += (e - s) * sizeof(struct page_info);
- if ((length >> PAGE_SHIFT) > (epfn - spfn))
+ if ( (length >> PAGE_SHIFT) > mem_npages )
return 0;
return 1;
#include <xen/multiboot.h>
#include <xen/param.h>
#include <xen/pci_regs.h>
+#include <xen/pdx.h>
#include <xen/pfn.h>
#if EFI_PAGE_SIZE != PAGE_SIZE
# error Cannot use xen/pfn.h here!
static bool __init cf_check ram_range_valid(unsigned long smfn, unsigned long emfn)
{
+ paddr_t ram_base = pfn_to_paddr(smfn);
+ unsigned long ram_npages = emfn - smfn;
unsigned long sz = pfn_to_pdx(emfn - 1) / PDX_GROUP_COUNT + 1;
- return !(smfn & pfn_hole_mask) &&
+ return pdx_is_region_compressible(ram_base, ram_npages) &&
find_next_bit(pdx_group_valid, sz,
pfn_to_pdx(smfn) / PDX_GROUP_COUNT) < sz;
}
u64 len = desc->NumberOfPages << EFI_PAGE_SHIFT;
unsigned long smfn, emfn;
unsigned int prot = PAGE_HYPERVISOR_RWX;
+ paddr_t mem_base;
+ unsigned long mem_npages;
printk(XENLOG_INFO " %013" PRIx64 "-%013" PRIx64
" type=%u attr=%016" PRIx64 "\n",
smfn = PFN_DOWN(desc->PhysicalStart);
emfn = PFN_UP(desc->PhysicalStart + len);
+ mem_base = pfn_to_paddr(smfn);
+ mem_npages = emfn - smfn;
+
if ( desc->Attribute & EFI_MEMORY_WB )
prot |= _PAGE_WB;
else if ( desc->Attribute & EFI_MEMORY_WT )
prot |= _PAGE_NX;
if ( pfn_to_pdx(emfn - 1) < (DIRECTMAP_SIZE >> PAGE_SHIFT) &&
- !(smfn & pfn_hole_mask) &&
- !((smfn ^ (emfn - 1)) & ~pfn_pdx_bottom_mask) )
+ pdx_is_region_compressible(mem_base, mem_npages) )
{
if ( (unsigned long)mfn_to_virt(emfn - 1) >= HYPERVISOR_VIRT_END )
prot &= ~_PAGE_GLOBAL;
}
/* Sets all bits from the most-significant 1-bit down to the LSB */
-static uint64_t __init fill_mask(uint64_t mask)
+static uint64_t fill_mask(uint64_t mask)
{
while (mask & (mask + 1))
mask |= mask + 1;
return mask;
}
+bool pdx_is_region_compressible(paddr_t base, unsigned long npages)
+{
+ return !(paddr_to_pfn(base) & pfn_hole_mask) &&
+ !(pdx_region_mask(base, npages * PAGE_SIZE) & ~ma_va_bottom_mask);
+}
+
/* We don't want to compress the low MAX_ORDER bits of the addresses. */
uint64_t __init pdx_init_mask(uint64_t base_addr)
{
(uint64_t)1 << (MAX_ORDER + PAGE_SHIFT)) - 1);
}
-uint64_t __init pdx_region_mask(uint64_t base, uint64_t len)
+uint64_t pdx_region_mask(uint64_t base, uint64_t len)
{
/*
* We say a bit "moves" in a range if there exist 2 addresses in that
(sizeof(*frame_table) & -sizeof(*frame_table)))
extern unsigned long pdx_group_valid[];
+/**
+ * Validate a region's compatibility with the current compression runtime
+ *
+ * @param base Base address of the region
+ * @param npages Number of PAGE_SIZE-sized pages in the region
+ * @return True iff the region can be used with the current compression
+ */
+bool pdx_is_region_compressible(paddr_t base, unsigned long npages);
+
/**
* Calculates a mask covering "moving" bits of all addresses of a region
*