machine_specific_memory_setup(raw);
- if ( xen_guest )
- hypervisor_fixup_e820(&e820);
+ if ( pv_shim )
+ pv_shim_fixup_e820(&e820);
printk("%s RAM map:\n", str);
print_e820_memory_map(e820.map, e820.nr_map);
static __read_mostly uint32_t xen_cpuid_base;
extern char hypercall_page[];
static struct rangeset *mem;
-static struct platform_bad_page __initdata reserved_pages[2];
DEFINE_PER_CPU(unsigned int, vcpu_id);
return rangeset_remove_range(mem, mfn_x(mfn), mfn_x(mfn));
}
-static void __init mark_pfn_as_ram(struct e820map *e820, uint64_t pfn)
-{
- if ( !e820_add_range(e820, pfn << PAGE_SHIFT,
- (pfn << PAGE_SHIFT) + PAGE_SIZE, E820_RAM) )
- if ( !e820_change_range_type(e820, pfn << PAGE_SHIFT,
- (pfn << PAGE_SHIFT) + PAGE_SIZE,
- E820_RESERVED, E820_RAM) )
- panic("Unable to add/change memory type of pfn %#lx to RAM\n", pfn);
-}
-
-void __init hypervisor_fixup_e820(struct e820map *e820)
-{
- uint64_t pfn = 0;
- unsigned int i = 0;
- long rc;
-
- ASSERT(xen_guest);
-
-#define MARK_PARAM_RAM(p) ({ \
- rc = xen_hypercall_hvm_get_param(p, &pfn); \
- if ( rc ) \
- panic("Unable to get " #p "\n"); \
- mark_pfn_as_ram(e820, pfn); \
- ASSERT(i < ARRAY_SIZE(reserved_pages)); \
- reserved_pages[i++].mfn = pfn; \
-})
- MARK_PARAM_RAM(HVM_PARAM_STORE_PFN);
- if ( !pv_console )
- MARK_PARAM_RAM(HVM_PARAM_CONSOLE_PFN);
-#undef MARK_PARAM_RAM
-}
-
-const struct platform_bad_page *__init hypervisor_reserved_pages(unsigned int *size)
-{
- ASSERT(xen_guest);
-
- *size = ARRAY_SIZE(reserved_pages);
-
- return reserved_pages;
-}
-
uint32_t hypervisor_cpuid_base(void)
{
return xen_cpuid_base;
static PAGE_LIST_HEAD(balloon);
static DEFINE_SPINLOCK(balloon_lock);
+static struct platform_bad_page __initdata reserved_pages[2];
+
static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
static long pv_shim_grant_table_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) uop,
return shim_nrpages;
}
+static void __init mark_pfn_as_ram(struct e820map *e820, uint64_t pfn)
+{
+ if ( !e820_add_range(e820, pfn << PAGE_SHIFT,
+ (pfn << PAGE_SHIFT) + PAGE_SIZE, E820_RAM) &&
+ !e820_change_range_type(e820, pfn << PAGE_SHIFT,
+ (pfn << PAGE_SHIFT) + PAGE_SIZE,
+ E820_RESERVED, E820_RAM) )
+ panic("Unable to add/change memory type of pfn %#lx to RAM\n", pfn);
+}
+
+void __init pv_shim_fixup_e820(struct e820map *e820)
+{
+ uint64_t pfn = 0;
+ unsigned int i = 0;
+ long rc;
+
+ ASSERT(xen_guest);
+
+#define MARK_PARAM_RAM(p) ({ \
+ rc = xen_hypercall_hvm_get_param(p, &pfn); \
+ if ( rc ) \
+ panic("Unable to get " #p "\n"); \
+ mark_pfn_as_ram(e820, pfn); \
+ ASSERT(i < ARRAY_SIZE(reserved_pages)); \
+ reserved_pages[i++].mfn = pfn; \
+})
+ MARK_PARAM_RAM(HVM_PARAM_STORE_PFN);
+ if ( !pv_console )
+ MARK_PARAM_RAM(HVM_PARAM_CONSOLE_PFN);
+#undef MARK_PARAM_RAM
+}
+
+const struct platform_bad_page *__init pv_shim_reserved_pages(unsigned int *size)
+{
+ ASSERT(xen_guest);
+
+ *size = ARRAY_SIZE(reserved_pages);
+
+ return reserved_pages;
+}
+
#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER| \
_PAGE_GUEST_KERNEL)
#define COMPAT_L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
}
}
- if ( xen_guest )
+ if ( pv_shim )
{
- badpage = hypervisor_reserved_pages(&array_size);
+ badpage = pv_shim_reserved_pages(&array_size);
if ( badpage )
{
for ( i = 0; i < array_size; i++ )
void hypervisor_ap_setup(void);
int hypervisor_alloc_unused_page(mfn_t *mfn);
int hypervisor_free_unused_page(mfn_t mfn);
-void hypervisor_fixup_e820(struct e820map *e820);
-const struct platform_bad_page *hypervisor_reserved_pages(unsigned int *size);
uint32_t hypervisor_cpuid_base(void);
void hypervisor_resume(void);
ASSERT_UNREACHABLE();
}
-static inline void hypervisor_fixup_e820(struct e820map *e820)
-{
- ASSERT_UNREACHABLE();
-}
-
-static inline const struct platform_bad_page *hypervisor_reserved_pages(unsigned int *size)
-{
- ASSERT_UNREACHABLE();
- return NULL;
-}
-
#endif /* CONFIG_XEN_GUEST */
#endif /* __X86_GUEST_XEN_H__ */
void pv_shim_offline_memory(unsigned int nr, unsigned int order);
domid_t get_initial_domain_id(void);
uint64_t pv_shim_mem(uint64_t avail);
+void pv_shim_fixup_e820(struct e820map *e820);
+const struct platform_bad_page *pv_shim_reserved_pages(unsigned int *size);
#else
ASSERT_UNREACHABLE();
return 0;
}
+static inline void pv_shim_fixup_e820(struct e820map *e820)
+{
+ ASSERT_UNREACHABLE();
+}
+static inline const struct platform_bad_page *
+pv_shim_reserved_pages(unsigned int *s)
+{
+ ASSERT_UNREACHABLE();
+ return NULL;
+}
#endif