ia64/linux-2.6.18-xen.hg

view include/asm-sh64/pgalloc.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef __ASM_SH64_PGALLOC_H
2 #define __ASM_SH64_PGALLOC_H
4 /*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/pgalloc.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003, 2004 Paul Mundt
13 * Copyright (C) 2003, 2004 Richard Curnow
14 *
15 */
17 #include <linux/threads.h>
18 #include <linux/mm.h>
20 #define pgd_quicklist (current_cpu_data.pgd_quick)
21 #define pmd_quicklist (current_cpu_data.pmd_quick)
22 #define pte_quicklist (current_cpu_data.pte_quick)
23 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
25 static inline void pgd_init(unsigned long page)
26 {
27 unsigned long *pgd = (unsigned long *)page;
28 extern pte_t empty_bad_pte_table[PTRS_PER_PTE];
29 int i;
31 for (i = 0; i < USER_PTRS_PER_PGD; i++)
32 pgd[i] = (unsigned long)empty_bad_pte_table;
33 }
35 /*
36 * Allocate and free page tables. The xxx_kernel() versions are
37 * used to allocate a kernel page table - this turns on ASN bits
38 * if any.
39 */
41 static inline pgd_t *get_pgd_slow(void)
42 {
43 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
44 pgd_t *ret = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
45 return ret;
46 }
48 static inline pgd_t *get_pgd_fast(void)
49 {
50 unsigned long *ret;
52 if ((ret = pgd_quicklist) != NULL) {
53 pgd_quicklist = (unsigned long *)(*ret);
54 ret[0] = 0;
55 pgtable_cache_size--;
56 } else
57 ret = (unsigned long *)get_pgd_slow();
59 if (ret) {
60 memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
61 }
62 return (pgd_t *)ret;
63 }
65 static inline void free_pgd_fast(pgd_t *pgd)
66 {
67 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
68 pgd_quicklist = (unsigned long *) pgd;
69 pgtable_cache_size++;
70 }
72 static inline void free_pgd_slow(pgd_t *pgd)
73 {
74 kfree((void *)pgd);
75 }
77 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
78 extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
80 static inline pte_t *get_pte_fast(void)
81 {
82 unsigned long *ret;
84 if((ret = (unsigned long *)pte_quicklist) != NULL) {
85 pte_quicklist = (unsigned long *)(*ret);
86 ret[0] = ret[1];
87 pgtable_cache_size--;
88 }
89 return (pte_t *)ret;
90 }
92 static inline void free_pte_fast(pte_t *pte)
93 {
94 *(unsigned long *)pte = (unsigned long) pte_quicklist;
95 pte_quicklist = (unsigned long *) pte;
96 pgtable_cache_size++;
97 }
99 static inline void pte_free_kernel(pte_t *pte)
100 {
101 free_page((unsigned long)pte);
102 }
104 static inline void pte_free(struct page *pte)
105 {
106 __free_page(pte);
107 }
109 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
110 unsigned long address)
111 {
112 pte_t *pte;
114 pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT|__GFP_ZERO);
116 return pte;
117 }
119 static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
120 {
121 struct page *pte;
123 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
125 return pte;
126 }
128 #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
130 /*
131 * allocating and freeing a pmd is trivial: the 1-entry pmd is
132 * inside the pgd, so has no extra memory associated with it.
133 */
135 #if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
137 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
138 #define pmd_free(x) do { } while (0)
139 #define pgd_populate(mm, pmd, pte) BUG()
140 #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
141 #define __pmd_free_tlb(tlb,pmd) do { } while (0)
143 #elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
145 static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
146 {
147 pmd_t *pmd;
148 pmd = (pmd_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
149 return pmd;
150 }
152 static __inline__ void pmd_free(pmd_t *pmd)
153 {
154 free_page((unsigned long) pmd);
155 }
157 #define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd)
158 #define __pmd_free_tlb(tlb,pmd) pmd_free(pmd)
160 #else
161 #error "No defined page table size"
162 #endif
164 #define check_pgt_cache() do { } while (0)
165 #define pgd_free(pgd) free_pgd_slow(pgd)
166 #define pgd_alloc(mm) get_pgd_fast()
168 extern int do_check_pgt_cache(int, int);
170 #define pmd_populate_kernel(mm, pmd, pte) \
171 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte)))
173 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
174 struct page *pte)
175 {
176 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) page_address (pte)));
177 }
179 #endif /* __ASM_SH64_PGALLOC_H */