]> xenbits.xensource.com Git - people/sstabellini/xen-unstable.git/.git/commitdiff
x86/shim: only mark special pages as RAM in pvshim mode
authorRoger Pau Monné <roger.pau@citrix.com>
Tue, 8 Jan 2019 09:03:45 +0000 (10:03 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 8 Jan 2019 09:03:45 +0000 (10:03 +0100)
When running Xen as a guest it's not necessary to mark such pages as
RAM because they won't be assigned to the initial domain memory map.

While there move the functions to the PV shim specific file and rename
them accordingly.

No functional change expected.

Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/arch/x86/e820.c
xen/arch/x86/guest/xen.c
xen/arch/x86/pv/shim.c
xen/common/page_alloc.c
xen/include/asm-x86/guest/xen.h
xen/include/asm-x86/pv/shim.h

index 590ea985ef082e1101120b37817e12be473a14d1..0c67ccd262d0b8e0c5e4880a7ff3c92552eb87e7 100644 (file)
@@ -700,8 +700,8 @@ unsigned long __init init_e820(const char *str, struct e820map *raw)
 
     machine_specific_memory_setup(raw);
 
-    if ( xen_guest )
-        hypervisor_fixup_e820(&e820);
+    if ( pv_shim )
+        pv_shim_fixup_e820(&e820);
 
     printk("%s RAM map:\n", str);
     print_e820_memory_map(e820.map, e820.nr_map);
index 8cee880adc659185ddc6db7b6a9904bdc94ad817..7b7a5badabb29d4de15dbc488a337e78b7fd15d8 100644 (file)
@@ -40,7 +40,6 @@ bool __read_mostly xen_guest;
 static __read_mostly uint32_t xen_cpuid_base;
 extern char hypercall_page[];
 static struct rangeset *mem;
-static struct platform_bad_page __initdata reserved_pages[2];
 
 DEFINE_PER_CPU(unsigned int, vcpu_id);
 
@@ -302,47 +301,6 @@ int hypervisor_free_unused_page(mfn_t mfn)
     return rangeset_remove_range(mem, mfn_x(mfn), mfn_x(mfn));
 }
 
-static void __init mark_pfn_as_ram(struct e820map *e820, uint64_t pfn)
-{
-    if ( !e820_add_range(e820, pfn << PAGE_SHIFT,
-                         (pfn << PAGE_SHIFT) + PAGE_SIZE, E820_RAM) )
-        if ( !e820_change_range_type(e820, pfn << PAGE_SHIFT,
-                                     (pfn << PAGE_SHIFT) + PAGE_SIZE,
-                                     E820_RESERVED, E820_RAM) )
-            panic("Unable to add/change memory type of pfn %#lx to RAM\n", pfn);
-}
-
-void __init hypervisor_fixup_e820(struct e820map *e820)
-{
-    uint64_t pfn = 0;
-    unsigned int i = 0;
-    long rc;
-
-    ASSERT(xen_guest);
-
-#define MARK_PARAM_RAM(p) ({                    \
-    rc = xen_hypercall_hvm_get_param(p, &pfn);  \
-    if ( rc )                                   \
-        panic("Unable to get " #p "\n");        \
-    mark_pfn_as_ram(e820, pfn);                 \
-    ASSERT(i < ARRAY_SIZE(reserved_pages));     \
-    reserved_pages[i++].mfn = pfn;              \
-})
-    MARK_PARAM_RAM(HVM_PARAM_STORE_PFN);
-    if ( !pv_console )
-        MARK_PARAM_RAM(HVM_PARAM_CONSOLE_PFN);
-#undef MARK_PARAM_RAM
-}
-
-const struct platform_bad_page *__init hypervisor_reserved_pages(unsigned int *size)
-{
-    ASSERT(xen_guest);
-
-    *size = ARRAY_SIZE(reserved_pages);
-
-    return reserved_pages;
-}
-
 uint32_t hypervisor_cpuid_base(void)
 {
     return xen_cpuid_base;
index cdc72f787df058c887637d1b09b572b467dd72aa..636a9d6a10daefea1c7ac73ed7b20ec548903312 100644 (file)
@@ -54,6 +54,8 @@ static DEFINE_SPINLOCK(grant_lock);
 static PAGE_LIST_HEAD(balloon);
 static DEFINE_SPINLOCK(balloon_lock);
 
+static struct platform_bad_page __initdata reserved_pages[2];
+
 static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
 static long pv_shim_grant_table_op(unsigned int cmd,
                                    XEN_GUEST_HANDLE_PARAM(void) uop,
@@ -113,6 +115,47 @@ uint64_t pv_shim_mem(uint64_t avail)
     return shim_nrpages;
 }
 
+static void __init mark_pfn_as_ram(struct e820map *e820, uint64_t pfn)
+{
+    if ( !e820_add_range(e820, pfn << PAGE_SHIFT,
+                         (pfn << PAGE_SHIFT) + PAGE_SIZE, E820_RAM) &&
+         !e820_change_range_type(e820, pfn << PAGE_SHIFT,
+                                 (pfn << PAGE_SHIFT) + PAGE_SIZE,
+                                 E820_RESERVED, E820_RAM) )
+        panic("Unable to add/change memory type of pfn %#lx to RAM\n", pfn);
+}
+
+void __init pv_shim_fixup_e820(struct e820map *e820)
+{
+    uint64_t pfn = 0;
+    unsigned int i = 0;
+    long rc;
+
+    ASSERT(xen_guest);
+
+#define MARK_PARAM_RAM(p) ({                    \
+    rc = xen_hypercall_hvm_get_param(p, &pfn);  \
+    if ( rc )                                   \
+        panic("Unable to get " #p "\n");        \
+    mark_pfn_as_ram(e820, pfn);                 \
+    ASSERT(i < ARRAY_SIZE(reserved_pages));     \
+    reserved_pages[i++].mfn = pfn;              \
+})
+    MARK_PARAM_RAM(HVM_PARAM_STORE_PFN);
+    if ( !pv_console )
+        MARK_PARAM_RAM(HVM_PARAM_CONSOLE_PFN);
+#undef MARK_PARAM_RAM
+}
+
+const struct platform_bad_page *__init pv_shim_reserved_pages(unsigned int *size)
+{
+    ASSERT(xen_guest);
+
+    *size = ARRAY_SIZE(reserved_pages);
+
+    return reserved_pages;
+}
+
 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER| \
                  _PAGE_GUEST_KERNEL)
 #define COMPAT_L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
index e591601f9ce8338d27a9855eb8eb971e534eafbb..37a52aaa0db2703fcdd944e0aea7870cd6b43cc1 100644 (file)
@@ -348,9 +348,9 @@ void __init init_boot_pages(paddr_t ps, paddr_t pe)
         }
     }
 
-    if ( xen_guest )
+    if ( pv_shim )
     {
-        badpage = hypervisor_reserved_pages(&array_size);
+        badpage = pv_shim_reserved_pages(&array_size);
         if ( badpage )
         {
             for ( i = 0; i < array_size; i++ )
index 6f15e24b6b513acd08661638c69776f1e1476e7f..7e04e4a7ab9ba5b767fb6afbbaa73670876c6c95 100644 (file)
@@ -36,8 +36,6 @@ void hypervisor_setup(void);
 void hypervisor_ap_setup(void);
 int hypervisor_alloc_unused_page(mfn_t *mfn);
 int hypervisor_free_unused_page(mfn_t mfn);
-void hypervisor_fixup_e820(struct e820map *e820);
-const struct platform_bad_page *hypervisor_reserved_pages(unsigned int *size);
 uint32_t hypervisor_cpuid_base(void);
 void hypervisor_resume(void);
 
@@ -60,17 +58,6 @@ static inline void hypervisor_ap_setup(void)
     ASSERT_UNREACHABLE();
 }
 
-static inline void hypervisor_fixup_e820(struct e820map *e820)
-{
-    ASSERT_UNREACHABLE();
-}
-
-static inline const struct platform_bad_page *hypervisor_reserved_pages(unsigned int *size)
-{
-    ASSERT_UNREACHABLE();
-    return NULL;
-}
-
 #endif /* CONFIG_XEN_GUEST */
 #endif /* __X86_GUEST_XEN_H__ */
 
index fb739772df158ab070381ed26ffb01cbe7dfb445..8a91f4f9dfbfe12f3223af17f15a719a1de43677 100644 (file)
@@ -43,6 +43,8 @@ void pv_shim_online_memory(unsigned int nr, unsigned int order);
 void pv_shim_offline_memory(unsigned int nr, unsigned int order);
 domid_t get_initial_domain_id(void);
 uint64_t pv_shim_mem(uint64_t avail);
+void pv_shim_fixup_e820(struct e820map *e820);
+const struct platform_bad_page *pv_shim_reserved_pages(unsigned int *size);
 
 #else
 
@@ -91,6 +93,16 @@ static inline uint64_t pv_shim_mem(uint64_t avail)
     ASSERT_UNREACHABLE();
     return 0;
 }
+static inline void pv_shim_fixup_e820(struct e820map *e820)
+{
+    ASSERT_UNREACHABLE();
+}
+static inline const struct platform_bad_page *
+pv_shim_reserved_pages(unsigned int *s)
+{
+    ASSERT_UNREACHABLE();
+    return NULL;
+}
 
 #endif