#ifndef __ASSEMBLY__
+enum bootmem_state {
+ BEFORE_BOOTMEM,
+ DURING_BOOTMEM,
+ AFTER_BOOTMEM
+};
+
+extern enum bootmem_state bootmem_state;
+
extern int page_is_ram(unsigned long pagenr);
extern int devmem_is_allowed(unsigned long pagenr);
#endif
initmem_init(0, max_pfn);
+ bootmem_state = DURING_BOOTMEM;
#ifdef CONFIG_ACPI_SLEEP
/*
unsigned long __meminitdata e820_table_end;
unsigned long __meminitdata e820_table_top;
-int after_bootmem;
+enum bootmem_state bootmem_state = BEFORE_BOOTMEM;
int direct_gbpages
#ifdef CONFIG_DIRECT_GBPAGES
printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
- if (!after_bootmem)
+ if (bootmem_state == BEFORE_BOOTMEM)
init_gbpages();
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
* memory mapped. Unfortunately this is done currently before the
* nodes are discovered.
*/
- if (!after_bootmem)
+ if (bootmem_state == BEFORE_BOOTMEM)
find_early_table_space(end, use_pse, use_gbpages);
#ifdef CONFIG_X86_32
#endif
#ifdef CONFIG_X86_64
- if (!after_bootmem)
+ if (bootmem_state == BEFORE_BOOTMEM)
mmu_cr4_features = read_cr4();
#endif
__flush_tlb_all();
- if (!after_bootmem && e820_table_end > e820_table_start)
+ if (bootmem_state == BEFORE_BOOTMEM &&
+ e820_table_end > e820_table_start)
reserve_early(e820_table_start << PAGE_SHIFT,
e820_table_end << PAGE_SHIFT, "PGTABLE");
- if (!after_bootmem)
+ if (bootmem_state == BEFORE_BOOTMEM)
early_memtest(start, end);
return ret >> PAGE_SHIFT;
static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
pud_t *pud;
- pmd_t *pmd_table;
+ pmd_t *pmd_table = NULL;
#ifdef CONFIG_X86_PAE
if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
- if (after_bootmem)
+ switch (bootmem_state) {
+ case DURING_BOOTMEM:
pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
- else
+ break;
+ case BEFORE_BOOTMEM:
pmd_table = (pmd_t *)alloc_low_page();
+ break;
+ default:
+ panic("after bootmem call one_md_table_init\n");
+ }
paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
pud = pud_offset(pgd, 0);
if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
pte_t *page_table = NULL;
- if (after_bootmem) {
+ switch (bootmem_state) {
+ case DURING_BOOTMEM:
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
if (!page_table)
page_table =
(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
- } else
+ break;
+ case BEFORE_BOOTMEM:
page_table = (pte_t *)alloc_low_page();
+ break;
+ default:
+ panic("after bootmem call one_page_table_init\n");
+ }
paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
pte_t *newpte;
int i;
- BUG_ON(after_bootmem);
+ BUG_ON(bootmem_state != BEFORE_BOOTMEM);
newpte = alloc_low_page();
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(newpte + i, pte[i]);
bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn,
bootmap);
}
-
- after_bootmem = 1;
}
/*
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
+ bootmem_state = AFTER_BOOTMEM;
+
reservedpages = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
/*
/*
* NOTE: This function is marked __ref because it calls __init function
- * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
+ * (alloc_bootmem_pages). It's safe to do it ONLY when DURING_BOOTMEM.
*/
static __ref void *spp_getpage(void)
{
- void *ptr;
+ void *ptr = NULL;
- if (after_bootmem)
+ switch (bootmem_state) {
+ case AFTER_BOOTMEM:
ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
- else
+ break;
+ case DURING_BOOTMEM:
ptr = alloc_bootmem_pages(PAGE_SIZE);
+ break;
+ default:
+ panic("calling spp_getpage before bootmem\n");
+ }
if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
panic("set_pte_phys: cannot allocate page data %s\n",
- after_bootmem ? "after bootmem" : "");
+ bootmem_state == AFTER_BOOTMEM ? "after bootmem" : "");
}
pr_debug("spp_getpage %p\n", ptr);
static __ref void *alloc_low_page(unsigned long *phys)
{
- unsigned long pfn = e820_table_end++;
+ unsigned long pfn;
void *adr;
- if (after_bootmem) {
+ if (bootmem_state == AFTER_BOOTMEM) {
adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
*phys = __pa(adr);
return adr;
}
+ pfn = e820_table_end++;
if (pfn >= e820_table_top)
panic("alloc_low_page: ran out of memory");
static __ref void unmap_low_page(void *adr)
{
- if (after_bootmem)
+ if (bootmem_state == AFTER_BOOTMEM)
return;
early_iounmap(adr, PAGE_SIZE);
for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
if (addr >= end) {
- if (!after_bootmem) {
+ if (bootmem_state != AFTER_BOOTMEM) {
for(; i < PTRS_PER_PTE; i++, pte++)
set_pte(pte, __pte(0));
}
pgprot_t new_prot = prot;
if (address >= end) {
- if (!after_bootmem) {
+ if (bootmem_state != AFTER_BOOTMEM) {
for (; i < PTRS_PER_PMD; i++, pmd++)
set_pmd(pmd, __pmd(0));
}
if (addr >= end)
break;
- if (!after_bootmem &&
+ if (bootmem_state != AFTER_BOOTMEM &&
!e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
set_pud(pud, __pud(0));
continue;
/* clear_bss() already clear the empty_zero_page */
- reservedpages = 0;
-
/* this will put all low memory onto the freelists */
#ifdef CONFIG_NUMA
totalram_pages = numa_free_all_bootmem();
totalram_pages = free_all_bootmem();
#endif
+ bootmem_state = AFTER_BOOTMEM;
absent_pages = absent_pages_in_range(0, max_pfn);
reservedpages = max_pfn - totalram_pages - absent_pages;
- after_bootmem = 1;
codesize = (unsigned long) &_etext - (unsigned long) &_text;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;