#include <asm/processor.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
+#include <asm/guest.h>
/*
* opt_mem: Limit maximum address of physical RAM.
machine_specific_memory_setup(raw);
+ if ( xen_guest )
+ hypervisor_fixup_e820(&e820);
+
printk("%s RAM map:\n", str);
print_e820_memory_map(e820.map, e820.nr_map);
#include <asm/processor.h>
#include <public/arch-x86/cpuid.h>
+#include <public/hvm/params.h>
bool __read_mostly xen_guest;
static __read_mostly uint32_t xen_cpuid_base;
extern char hypercall_page[];
static struct rangeset *mem;
+static unsigned long __initdata reserved_pages[2];
DEFINE_PER_CPU(unsigned int, vcpu_id);
return rangeset_remove_range(mem, mfn_x(mfn), mfn_x(mfn));
}
+static void __init mark_pfn_as_ram(struct e820map *e820, uint64_t pfn)
+{
+ if ( !e820_add_range(e820, pfn << PAGE_SHIFT,
+ (pfn << PAGE_SHIFT) + PAGE_SIZE, E820_RAM) )
+ if ( !e820_change_range_type(e820, pfn << PAGE_SHIFT,
+ (pfn << PAGE_SHIFT) + PAGE_SIZE,
+ E820_RESERVED, E820_RAM) )
+ panic("Unable to add/change memory type of pfn %#lx to RAM", pfn);
+}
+
+void __init hypervisor_fixup_e820(struct e820map *e820)
+{
+ uint64_t pfn = 0;
+ unsigned int i = 0;
+ long rc;
+
+ ASSERT(xen_guest);
+
+#define MARK_PARAM_RAM(p) ({ \
+ rc = xen_hypercall_hvm_get_param(p, &pfn); \
+ if ( rc ) \
+ panic("Unable to get " #p); \
+ mark_pfn_as_ram(e820, pfn); \
+ ASSERT(i < ARRAY_SIZE(reserved_pages)); \
+ reserved_pages[i++] = pfn << PAGE_SHIFT; \
+})
+ MARK_PARAM_RAM(HVM_PARAM_STORE_PFN);
+ if ( !pv_console )
+ MARK_PARAM_RAM(HVM_PARAM_CONSOLE_PFN);
+#undef MARK_PARAM_RAM
+}
+
+const unsigned long *__init hypervisor_reserved_pages(unsigned int *size)
+{
+ ASSERT(xen_guest);
+
+ *size = ARRAY_SIZE(reserved_pages);
+
+ return reserved_pages;
+}
+
/*
* Local variables:
* mode: C
#include <asm/numa.h>
#include <asm/flushtlb.h>
#ifdef CONFIG_X86
+#include <asm/guest.h>
#include <asm/p2m.h>
#include <asm/setup.h> /* for highmem_start only */
#else
badpage++;
}
}
+
+ if ( xen_guest )
+ {
+ badpage = hypervisor_reserved_pages(&array_size);
+ if ( badpage )
+ {
+ for ( i = 0; i < array_size; i++ )
+ {
+ bootmem_region_zap(*badpage >> PAGE_SHIFT,
+ (*badpage >> PAGE_SHIFT) + 1);
+ badpage++;
+ }
+ }
+ }
#endif
/* Check new pages against the bad-page list. */
#ifdef CONFIG_XEN_GUEST
extern bool xen_guest;
+extern bool pv_console;
void probe_hypervisor(void);
void hypervisor_setup(void);
void hypervisor_ap_setup(void);
int hypervisor_alloc_unused_page(mfn_t *mfn);
int hypervisor_free_unused_page(mfn_t mfn);
+void hypervisor_fixup_e820(struct e820map *e820);
+const unsigned long *hypervisor_reserved_pages(unsigned int *size);
DECLARE_PER_CPU(unsigned int, vcpu_id);
DECLARE_PER_CPU(struct vcpu_info *, vcpu_info);
#else
#define xen_guest 0
+#define pv_console 0
static inline void probe_hypervisor(void) {};
static inline void hypervisor_setup(void)
ASSERT_UNREACHABLE();
}
+static inline void hypervisor_fixup_e820(struct e820map *e820)
+{
+ ASSERT_UNREACHABLE();
+}
+static inline const unsigned long *hypervisor_reserved_pages(unsigned int *size)
+{
+ ASSERT_UNREACHABLE();
+ return NULL;
+};
+
#endif /* CONFIG_XEN_GUEST */
#endif /* __X86_GUEST_XEN_H__ */