static __read_mostly uint32_t xen_cpuid_base;
extern char hypercall_page[];
static struct rangeset *mem;
-static unsigned long __initdata reserved_pages[2];
+static struct platform_bad_page __initdata reserved_pages[2];
DEFINE_PER_CPU(unsigned int, vcpu_id);
panic("Unable to get " #p "\n"); \
mark_pfn_as_ram(e820, pfn); \
ASSERT(i < ARRAY_SIZE(reserved_pages)); \
- reserved_pages[i++] = pfn << PAGE_SHIFT; \
+ reserved_pages[i++].mfn = pfn; \
})
MARK_PARAM_RAM(HVM_PARAM_STORE_PFN);
if ( !pv_console )
#undef MARK_PARAM_RAM
}
-const unsigned long *__init hypervisor_reserved_pages(unsigned int *size)
+const struct platform_bad_page *__init hypervisor_reserved_pages(unsigned int *size)
{
ASSERT(xen_guest);
mem_sharing_get_nr_saved_mfns());
}
-const unsigned long *__init get_platform_badpages(unsigned int *array_size)
+const struct platform_bad_page *__init get_platform_badpages(unsigned int *array_size)
{
u32 igd_id;
- static unsigned long __initdata bad_pages[] = {
- 0x20050000,
- 0x20110000,
- 0x20130000,
- 0x20138000,
- 0x40004000,
+ static const struct platform_bad_page __initconst snb_bad_pages[] = {
+ { .mfn = 0x20050000 >> PAGE_SHIFT },
+ { .mfn = 0x20110000 >> PAGE_SHIFT },
+ { .mfn = 0x20130000 >> PAGE_SHIFT },
+ { .mfn = 0x20138000 >> PAGE_SHIFT },
+ { .mfn = 0x40004000 >> PAGE_SHIFT },
};
- *array_size = ARRAY_SIZE(bad_pages);
+ *array_size = ARRAY_SIZE(snb_bad_pages);
igd_id = pci_conf_read32(0, 0, 2, 0, 0);
- if ( !IS_SNB_GFX(igd_id) )
- return NULL;
+ if ( IS_SNB_GFX(igd_id) )
+ return snb_bad_pages;
- return bad_pages;
+ return NULL;
}
void paging_invlpg(struct vcpu *v, unsigned long linear)
unsigned long bad_spfn, bad_epfn;
const char *p;
#ifdef CONFIG_X86
- const unsigned long *badpage = NULL;
+ const struct platform_bad_page *badpage;
unsigned int i, array_size;
BUILD_BUG_ON(8 * sizeof(frame_table->u.free.first_dirty) <
{
for ( i = 0; i < array_size; i++ )
{
- bootmem_region_zap(*badpage >> PAGE_SHIFT,
- (*badpage >> PAGE_SHIFT) + 1);
+ bootmem_region_zap(badpage->mfn,
+ badpage->mfn + (1U << badpage->order));
badpage++;
}
}
{
for ( i = 0; i < array_size; i++ )
{
- bootmem_region_zap(*badpage >> PAGE_SHIFT,
- (*badpage >> PAGE_SHIFT) + 1);
+ bootmem_region_zap(badpage->mfn,
+ badpage->mfn + (1U << badpage->order));
badpage++;
}
}
int hypervisor_alloc_unused_page(mfn_t *mfn);
int hypervisor_free_unused_page(mfn_t mfn);
void hypervisor_fixup_e820(struct e820map *e820);
-const unsigned long *hypervisor_reserved_pages(unsigned int *size);
+const struct platform_bad_page *hypervisor_reserved_pages(unsigned int *size);
uint32_t hypervisor_cpuid_base(void);
void hypervisor_resume(void);
ASSERT_UNREACHABLE();
}
-static inline const unsigned long *hypervisor_reserved_pages(unsigned int *size)
+static inline const struct platform_bad_page *hypervisor_reserved_pages(unsigned int *size)
{
ASSERT_UNREACHABLE();
return NULL;
bool is_iomem_page(mfn_t mfn);
-const unsigned long *get_platform_badpages(unsigned int *array_size);
+struct platform_bad_page {
+ unsigned long mfn;
+ unsigned int order;
+};
+
+const struct platform_bad_page *get_platform_badpages(unsigned int *array_size);
+
/* Per page locks:
* page_lock() is used for two purposes: pte serialization, and memory sharing.
*