ia64/xen-unstable

view xen/include/asm-ia64/linux-xen/asm/pgalloc.h @ 10673:80e04aa530b8

[IA64] Fix LOAD_PHYSCAL macro

This fix is workaround. Now LOAD_PHYSCAL is used only
by MCA/INIT handler.

Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Thu Jul 06 10:04:57 2006 -0600 (2006-07-06)
parents bc76ad9d6270
children 439051df45f3
line source
1 #ifndef _ASM_IA64_PGALLOC_H
2 #define _ASM_IA64_PGALLOC_H
4 /*
5 * This file contains the functions and defines necessary to allocate
6 * page tables.
7 *
8 * This hopefully works with any (fixed) ia-64 page-size, as defined
9 * in <asm/page.h> (currently 8192).
10 *
11 * Copyright (C) 1998-2001 Hewlett-Packard Co
12 * David Mosberger-Tang <davidm@hpl.hp.com>
13 * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
14 */
16 #include <linux/config.h>
18 #include <linux/compiler.h>
19 #include <linux/mm.h>
20 #include <linux/page-flags.h>
21 #include <linux/threads.h>
23 #include <asm/mmu_context.h>
25 #ifndef XEN
26 DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
27 #define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
28 DECLARE_PER_CPU(long, __pgtable_quicklist_size);
29 #define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
31 static inline long pgtable_quicklist_total_size(void)
32 {
33 long ql_size = 0;
34 int cpuid;
36 for_each_online_cpu(cpuid) {
37 ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
38 }
39 return ql_size;
40 }
42 static inline void *pgtable_quicklist_alloc(void)
43 {
44 unsigned long *ret = NULL;
46 preempt_disable();
48 ret = pgtable_quicklist;
49 if (likely(ret != NULL)) {
50 pgtable_quicklist = (unsigned long *)(*ret);
51 ret[0] = 0;
52 --pgtable_quicklist_size;
53 preempt_enable();
54 } else {
55 preempt_enable();
56 ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
57 }
59 return ret;
60 }
62 static inline void pgtable_quicklist_free(void *pgtable_entry)
63 {
64 #ifdef CONFIG_NUMA
65 unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
67 if (unlikely(nid != numa_node_id())) {
68 free_page((unsigned long)pgtable_entry);
69 return;
70 }
71 #endif
73 preempt_disable();
74 *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
75 pgtable_quicklist = (unsigned long *)pgtable_entry;
76 ++pgtable_quicklist_size;
77 preempt_enable();
78 }
79 #endif
81 #ifdef XEN
82 #include <asm/pgtable.h>
83 #ifdef __PAGETABLE_PUD_FOLDED
84 # define pgd_cmpxchg_rel(mm, pgd, old_pud, new_pud) ({(void)old_pud;1;})
85 #else
86 # error "implement pgd_cmpxchg_rel()!"
87 #endif
88 #endif
90 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
91 {
92 return pgtable_quicklist_alloc();
93 }
95 static inline void pgd_free(pgd_t * pgd)
96 {
97 pgtable_quicklist_free(pgd);
98 }
100 static inline void
101 pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
102 {
103 pud_val(*pud_entry) = __pa(pmd);
104 }
106 #ifdef XEN
107 static inline int
108 pud_cmpxchg_rel(struct mm_struct *mm, pud_t * pud_entry,
109 pmd_t * old_pmd, pmd_t * new_pmd)
110 {
111 #ifdef CONFIG_SMP
112 unsigned long r;
113 r = cmpxchg_rel(&pud_val(*pud_entry), __pa(old_pmd), __pa(new_pmd));
114 return (r == __pa(old_pmd));
115 #else
116 if (pud_val(*pud_entry) == __pa(old_pmd)) {
117 pud_val(*pud_entry) = __pa(new_pmd);
118 return 1;
119 }
120 return 0;
121 #endif
122 }
123 #endif
125 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
126 {
127 return pgtable_quicklist_alloc();
128 }
130 static inline void pmd_free(pmd_t * pmd)
131 {
132 pgtable_quicklist_free(pmd);
133 }
135 #define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
137 #ifndef XEN
138 static inline void
139 pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
140 {
141 pmd_val(*pmd_entry) = page_to_maddr(pte);
142 }
143 #endif
145 static inline void
146 pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
147 {
148 pmd_val(*pmd_entry) = __pa(pte);
149 }
151 #ifdef XEN
152 static inline int
153 pmd_cmpxchg_kernel_rel(struct mm_struct *mm, pmd_t * pmd_entry,
154 pte_t * old_pte, pte_t * new_pte)
155 {
156 #ifdef CONFIG_SMP
157 unsigned long r;
158 r = cmpxchg_rel(&pmd_val(*pmd_entry), __pa(old_pte), __pa(new_pte));
159 return (r == __pa(old_pte));
160 #else
161 if (pmd_val(*pmd_entry) == __pa(old_pte)) {
162 pmd_val(*pmd_entry) = __pa(new_pte);
163 return 1;
164 }
165 return 0;
166 #endif
167 }
168 #endif
170 #ifndef XEN
171 static inline struct page *pte_alloc_one(struct mm_struct *mm,
172 unsigned long addr)
173 {
174 return virt_to_page(pgtable_quicklist_alloc());
175 }
176 #endif
178 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
179 unsigned long addr)
180 {
181 return pgtable_quicklist_alloc();
182 }
184 #ifndef XEN
185 static inline void pte_free(struct page *pte)
186 {
187 pgtable_quicklist_free(page_address(pte));
188 }
189 #endif
191 static inline void pte_free_kernel(pte_t * pte)
192 {
193 pgtable_quicklist_free(pte);
194 }
196 #ifndef XEN
197 #define __pte_free_tlb(tlb, pte) pte_free(pte)
198 #endif
200 extern void check_pgt_cache(void);
202 #endif /* _ASM_IA64_PGALLOC_H */