ia64/xen-unstable

view xen/include/asm-ia64/linux-xen/asm/pgalloc.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 439051df45f3
children
line source
1 #ifndef _ASM_IA64_PGALLOC_H
2 #define _ASM_IA64_PGALLOC_H
4 /*
5 * This file contains the functions and defines necessary to allocate
6 * page tables.
7 *
8 * This hopefully works with any (fixed) ia-64 page-size, as defined
9 * in <asm/page.h> (currently 8192).
10 *
11 * Copyright (C) 1998-2001 Hewlett-Packard Co
12 * David Mosberger-Tang <davidm@hpl.hp.com>
13 * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
14 */
16 #include <linux/config.h>
18 #include <linux/compiler.h>
19 #include <linux/mm.h>
20 #include <linux/page-flags.h>
21 #include <linux/threads.h>
23 #include <asm/mmu_context.h>
25 #ifndef XEN
26 DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
27 #define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
28 DECLARE_PER_CPU(long, __pgtable_quicklist_size);
29 #define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
31 static inline long pgtable_quicklist_total_size(void)
32 {
33 long ql_size = 0;
34 int cpuid;
36 for_each_online_cpu(cpuid) {
37 ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
38 }
39 return ql_size;
40 }
42 static inline void *pgtable_quicklist_alloc(void)
43 {
44 unsigned long *ret = NULL;
46 preempt_disable();
48 ret = pgtable_quicklist;
49 if (likely(ret != NULL)) {
50 pgtable_quicklist = (unsigned long *)(*ret);
51 ret[0] = 0;
52 --pgtable_quicklist_size;
53 preempt_enable();
54 } else {
55 preempt_enable();
56 ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
57 }
59 return ret;
60 }
62 static inline void pgtable_quicklist_free(void *pgtable_entry)
63 {
64 #ifdef CONFIG_NUMA
65 unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
67 if (unlikely(nid != numa_node_id())) {
68 free_page((unsigned long)pgtable_entry);
69 return;
70 }
71 #endif
73 preempt_disable();
74 *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
75 pgtable_quicklist = (unsigned long *)pgtable_entry;
76 ++pgtable_quicklist_size;
77 preempt_enable();
78 }
79 #endif
81 #ifdef XEN
82 #include <asm/pgtable.h>
83 #ifdef __PAGETABLE_PUD_FOLDED
84 # define pgd_cmpxchg_rel(mm, pgd, old_pud, new_pud) ({(void)old_pud;1;})
85 #else
86 # error "implement pgd_cmpxchg_rel()!"
87 #endif
88 #endif
90 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
91 {
92 return pgtable_quicklist_alloc();
93 }
95 #ifndef XEN
96 static inline void pgd_free(pgd_t * pgd)
97 {
98 pgtable_quicklist_free(pgd);
99 }
100 #else
101 static inline void pgd_free(volatile pgd_t * pgd)
102 {
103 pgtable_quicklist_free((void*)pgd);
104 }
105 #endif
107 static inline void
108 pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
109 {
110 pud_val(*pud_entry) = __pa(pmd);
111 }
113 #ifdef XEN
114 static inline int
115 pud_cmpxchg_rel(struct mm_struct *mm, volatile pud_t * pud_entry,
116 pmd_t * old_pmd, volatile pmd_t * new_pmd)
117 {
118 #ifdef CONFIG_SMP
119 unsigned long r;
120 r = cmpxchg_rel(&pud_val(*pud_entry), __pa(old_pmd), __pa(new_pmd));
121 return (r == __pa(old_pmd));
122 #else
123 if (pud_val(*pud_entry) == __pa(old_pmd)) {
124 pud_val(*pud_entry) = __pa(new_pmd);
125 return 1;
126 }
127 return 0;
128 #endif
129 }
130 #endif
132 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
133 {
134 return pgtable_quicklist_alloc();
135 }
137 #ifndef XEN
138 static inline void pmd_free(pmd_t * pmd)
139 {
140 pgtable_quicklist_free(pmd);
141 }
142 #else
143 static inline void pmd_free(volatile pmd_t * pmd)
144 {
145 pgtable_quicklist_free((void*)pmd);
146 }
147 #endif
149 #define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
151 #ifndef XEN
152 static inline void
153 pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
154 {
155 pmd_val(*pmd_entry) = page_to_maddr(pte);
156 }
157 #endif
159 static inline void
160 pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
161 {
162 pmd_val(*pmd_entry) = __pa(pte);
163 }
165 #ifdef XEN
166 static inline int
167 pmd_cmpxchg_kernel_rel(struct mm_struct *mm, volatile pmd_t * pmd_entry,
168 pte_t * old_pte, pte_t * new_pte)
169 {
170 #ifdef CONFIG_SMP
171 unsigned long r;
172 r = cmpxchg_rel(&pmd_val(*pmd_entry), __pa(old_pte), __pa(new_pte));
173 return (r == __pa(old_pte));
174 #else
175 if (pmd_val(*pmd_entry) == __pa(old_pte)) {
176 pmd_val(*pmd_entry) = __pa(new_pte);
177 return 1;
178 }
179 return 0;
180 #endif
181 }
182 #endif
184 #ifndef XEN
185 static inline struct page *pte_alloc_one(struct mm_struct *mm,
186 unsigned long addr)
187 {
188 return virt_to_page(pgtable_quicklist_alloc());
189 }
190 #endif
192 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
193 unsigned long addr)
194 {
195 return pgtable_quicklist_alloc();
196 }
198 #ifndef XEN
199 static inline void pte_free(struct page *pte)
200 {
201 pgtable_quicklist_free(page_address(pte));
202 }
204 static inline void pte_free_kernel(pte_t * pte)
205 {
206 pgtable_quicklist_free(pte);
207 }
208 #else
209 static inline void pte_free_kernel(volatile pte_t * pte)
210 {
211 pgtable_quicklist_free((void*)pte);
212 }
213 #endif
215 #ifndef XEN
216 #define __pte_free_tlb(tlb, pte) pte_free(pte)
217 #endif
219 extern void check_pgt_cache(void);
221 #endif /* _ASM_IA64_PGALLOC_H */