EXPORT_SYMBOL(HYPERVISOR_shared_info);
unsigned long *phys_to_machine_mapping;
-unsigned long *pfn_to_mfn_frame_list_list, **pfn_to_mfn_frame_list;
+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
EXPORT_SYMBOL(phys_to_machine_mapping);
/* Raw start-of-day parameters from the hypervisor. */
}
}
-#ifndef CONFIG_XEN
else if (!memcmp(from, "memmap=", 7)) {
if (to != command_line)
to--;
}
}
}
-#endif
else if (!memcmp(from, "noexec=", 7))
noexec_setup(from + 7);
int i, j, k, fpp;
struct physdev_set_iopl set_iopl;
unsigned long max_low_pfn;
- unsigned long p2m_pages, size;
+ unsigned long p2m_pages;
/* Force a quick death if the kernel panics (not domain 0). */
extern int panic_timeout;
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
phys_to_machine_mapping = alloc_bootmem_low_pages(
max_pfn * sizeof(unsigned long));
+ memset(phys_to_machine_mapping, ~0,
+ max_pfn * sizeof(unsigned long));
memcpy(phys_to_machine_mapping,
(unsigned long *)xen_start_info->mfn_list,
p2m_pages * sizeof(unsigned long));
- memset(phys_to_machine_mapping + p2m_pages, ~0,
- (max_pfn - p2m_pages) * sizeof(unsigned long));
free_bootmem(
__pa(xen_start_info->mfn_list),
PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
* Initialise the list of the frames that specify the list of
* frames that make up the p2m table. Used by save/restore
*/
- fpp = PAGE_SIZE/sizeof(unsigned long);
- size = (max_pfn + fpp - 1) / fpp;
- size = (size + fpp - 1) / fpp;
- size *= sizeof(unsigned long);
- BUG_ON(size > PAGE_SIZE);
- pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(size);
- pfn_to_mfn_frame_list = alloc_bootmem_low(size);
+ pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
+ fpp = PAGE_SIZE/sizeof(unsigned long);
for (i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++) {
if ((j % fpp) == 0) {
k++;
- BUG_ON(k * sizeof(unsigned long) >= size);
+ BUG_ON(k>=16);
pfn_to_mfn_frame_list[k] =
alloc_bootmem_low_pages(PAGE_SIZE);
pfn_to_mfn_frame_list_list[k] =
#include <linux/string.h>
#include <linux/kexec.h>
#include <linux/module.h>
-#include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/page.h>
i = e820.nr_map-1;
current_end = e820.map[i].addr + e820.map[i].size;
- /*
- * A little less than 2% of available memory are needed for page
- * tables, p2m map, and mem_map. Hence the maximum amount of memory
- * we can potentially balloon up to cannot exceed about 50 times
- * what we've been given initially.
- */
-#define PAGE_OVERHEAD (PAGE_SIZE \
- / (sizeof(pte_t) + sizeof(long) + sizeof(struct page)))
- if (end_user_pfn / (PAGE_OVERHEAD - 1) > xen_start_info->nr_pages) {
- end = (xen_start_info->nr_pages * (PAGE_OVERHEAD - 1))
- << PAGE_SHIFT;
- printk(KERN_WARNING "mem=%lu is invalid for an initial"
- " allocation of %lu, using %lu\n",
- end_user_pfn << PAGE_SHIFT,
- xen_start_info->nr_pages << PAGE_SHIFT, end);
- end_user_pfn = end >> PAGE_SHIFT;
- }
-
if (current_end < end) {
/*
* The e820 map ends before our requested size so
}
}
-#ifndef CONFIG_XEN
void __init parse_memmapopt(char *p, char **from)
{
unsigned long long start_at, mem_size;
}
p = *from;
}
-#endif
unsigned long pci_mem_start = 0xaeedbabe;
EXPORT_SYMBOL(pci_mem_start);
};
unsigned long *phys_to_machine_mapping;
-unsigned long *pfn_to_mfn_frame_list_list, **pfn_to_mfn_frame_list;
+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
EXPORT_SYMBOL(phys_to_machine_mapping);
if (!memcmp(from, "mem=", 4))
parse_memopt(from+4, &from);
-#ifndef CONFIG_XEN
if (!memcmp(from, "memmap=", 7)) {
/* exactmap option is for used defined memory */
if (!memcmp(from+7, "exactmap", 8)) {
userdef = 1;
}
}
-#endif
#ifdef CONFIG_NUMA
if (!memcmp(from, "numa=", 5))
#ifdef CONFIG_XEN
{
int i, j, k, fpp;
- unsigned long p2m_pages, size;
+ unsigned long p2m_pages;
p2m_pages = end_pfn;
if (xen_start_info->nr_pages > end_pfn) {
/* Make sure we have a large enough P->M table. */
phys_to_machine_mapping = alloc_bootmem_pages(
end_pfn * sizeof(unsigned long));
+ memset(phys_to_machine_mapping, ~0,
+ end_pfn * sizeof(unsigned long));
memcpy(phys_to_machine_mapping,
(unsigned long *)xen_start_info->mfn_list,
p2m_pages * sizeof(unsigned long));
- memset(phys_to_machine_mapping + p2m_pages, ~0,
- (end_pfn - p2m_pages) * sizeof(unsigned long));
free_bootmem(
__pa(xen_start_info->mfn_list),
PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
* list of frames that make up the p2m table. Used by
* save/restore.
*/
- fpp = PAGE_SIZE/sizeof(unsigned long);
- size = (max_pfn + fpp - 1) / fpp;
- size = (size + fpp - 1) / fpp;
- size *= sizeof(unsigned long);
- pfn_to_mfn_frame_list_list = alloc_bootmem_pages(size);
- if (size > PAGE_SIZE
- && xen_create_contiguous_region((unsigned long)
- pfn_to_mfn_frame_list_list,
- get_order(size), 0))
- BUG();
- pfn_to_mfn_frame_list = alloc_bootmem(size);
+ pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
+ fpp = PAGE_SIZE/sizeof(unsigned long);
for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
if ((j % fpp) == 0) {
k++;
- BUG_ON(k * sizeof(unsigned long) >= size);
+ BUG_ON(k>=fpp);
pfn_to_mfn_frame_list[k] =
alloc_bootmem_pages(PAGE_SIZE);
pfn_to_mfn_frame_list_list[k] =
while (va < (__START_KERNEL_map
+ (start_pfn << PAGE_SHIFT)
+ tables_space)) {
- if (!pmd_index(va) && !pte_index(va)) {
- page = (unsigned long *)init_level4_pgt;
- addr = page[pgd_index(va)];
- addr_to_page(addr, page);
- addr = page[pud_index(va)];
- addr_to_page(addr, page);
- }
pmd = (pmd_t *)&page[pmd_index(va)];
if (pmd_none(*pmd)) {
pte_page = alloc_static_page(&phys);
unsigned long shinfo_mfn;
extern unsigned long max_pfn;
extern unsigned long *pfn_to_mfn_frame_list_list;
- extern unsigned long **pfn_to_mfn_frame_list;
+ extern unsigned long *pfn_to_mfn_frame_list[];
if (suspend_cancelled) {
xen_start_info->store_mfn =
#define FIRST_USER_ADDRESS 0
#ifndef __ASSEMBLY__
-#define MAXMEM 0xdfffffffffUL
+#define MAXMEM 0x3fffffffffffUL
#define VMALLOC_START 0xffffc20000000000UL
#define VMALLOC_END 0xffffe1ffffffffffUL
#define MODULES_VADDR 0xffffffff88000000UL
-#define MODULES_END 0xffffffffff000000UL
+#define MODULES_END 0xfffffffffff00000UL
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
#define _PAGE_BIT_PRESENT 0