ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/pgalloc.h @ 11221:7c9d7fc3dce5

[HVM] Fix SMBIOS entry point copy destination.
Spotted by Xiaowei Yang <xiaowei.yang@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@localhost.localdomain
date Sat Aug 19 12:06:36 2006 +0100 (2006-08-19)
parents 57f21f67b532
children 7c5c72a0283f 4fad820a2233
line source
1 #ifndef _X86_64_PGALLOC_H
2 #define _X86_64_PGALLOC_H
4 #include <asm/fixmap.h>
5 #include <asm/pda.h>
6 #include <linux/threads.h>
7 #include <linux/mm.h>
8 #include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
10 #include <xen/features.h>
11 void make_page_readonly(void *va, unsigned int feature);
12 void make_page_writable(void *va, unsigned int feature);
13 void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
14 void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
16 #define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
18 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
19 {
20 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
21 }
23 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
24 {
25 if (unlikely((mm)->context.pinned)) {
26 BUG_ON(HYPERVISOR_update_va_mapping(
27 (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
28 pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
29 set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
30 } else {
31 *(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
32 }
33 }
35 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
36 {
37 if (unlikely((mm)->context.pinned)) {
38 BUG_ON(HYPERVISOR_update_va_mapping(
39 (unsigned long)pmd,
40 pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT,
41 PAGE_KERNEL_RO), 0));
42 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
43 } else {
44 *(pud) = __pud(_PAGE_TABLE | __pa(pmd));
45 }
46 }
48 /*
49 * We need to use the batch mode here, but pgd_pupulate() won't be
50 * be called frequently.
51 */
52 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
53 {
54 if (unlikely((mm)->context.pinned)) {
55 BUG_ON(HYPERVISOR_update_va_mapping(
56 (unsigned long)pud,
57 pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT,
58 PAGE_KERNEL_RO), 0));
59 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
60 set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
61 } else {
62 *(pgd) = __pgd(_PAGE_TABLE | __pa(pud));
63 *(__user_pgd(pgd)) = *(pgd);
64 }
65 }
67 static inline void pmd_free(pmd_t *pmd)
68 {
69 pte_t *ptep = virt_to_ptep(pmd);
71 if (!pte_write(*ptep)) {
72 BUG_ON(HYPERVISOR_update_va_mapping(
73 (unsigned long)pmd,
74 pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, PAGE_KERNEL),
75 0));
76 }
77 free_page((unsigned long)pmd);
78 }
80 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
81 {
82 pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
83 return pmd;
84 }
86 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
87 {
88 pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
89 return pud;
90 }
92 static inline void pud_free(pud_t *pud)
93 {
94 pte_t *ptep = virt_to_ptep(pud);
96 if (!pte_write(*ptep)) {
97 BUG_ON(HYPERVISOR_update_va_mapping(
98 (unsigned long)pud,
99 pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, PAGE_KERNEL),
100 0));
101 }
102 free_page((unsigned long)pud);
103 }
105 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
106 {
107 /*
108 * We allocate two contiguous pages for kernel and user.
109 */
110 unsigned boundary;
111 pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
113 if (!pgd)
114 return NULL;
115 /*
116 * Copy kernel pointers in from init.
117 * Could keep a freelist or slab cache of those because the kernel
118 * part never changes.
119 */
120 boundary = pgd_index(__PAGE_OFFSET);
121 memset(pgd, 0, boundary * sizeof(pgd_t));
122 memcpy(pgd + boundary,
123 init_level4_pgt + boundary,
124 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
126 memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
127 /*
128 * Set level3_user_pgt for vsyscall area
129 */
130 set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
131 mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
132 return pgd;
133 }
135 static inline void pgd_free(pgd_t *pgd)
136 {
137 pte_t *ptep = virt_to_ptep(pgd);
139 if (!pte_write(*ptep)) {
140 xen_pgd_unpin(__pa(pgd));
141 BUG_ON(HYPERVISOR_update_va_mapping(
142 (unsigned long)pgd,
143 pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
144 0));
145 }
147 ptep = virt_to_ptep(__user_pgd(pgd));
149 if (!pte_write(*ptep)) {
150 xen_pgd_unpin(__pa(__user_pgd(pgd)));
151 BUG_ON(HYPERVISOR_update_va_mapping(
152 (unsigned long)__user_pgd(pgd),
153 pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT,
154 PAGE_KERNEL),
155 0));
156 }
158 free_pages((unsigned long)pgd, 1);
159 }
161 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
162 {
163 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
164 if (pte)
165 make_page_readonly(pte, XENFEAT_writable_page_tables);
167 return pte;
168 }
170 static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
171 {
172 struct page *pte;
174 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
175 return pte;
176 }
178 /* Should really implement gc for free page table pages. This could be
179 done with a reference count in struct page. */
181 static inline void pte_free_kernel(pte_t *pte)
182 {
183 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
184 make_page_writable(pte, XENFEAT_writable_page_tables);
185 free_page((unsigned long)pte);
186 }
188 extern void pte_free(struct page *pte);
190 //#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
191 //#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
192 //#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
194 #define __pte_free_tlb(tlb,x) pte_free((x))
195 #define __pmd_free_tlb(tlb,x) pmd_free((x))
196 #define __pud_free_tlb(tlb,x) pud_free((x))
198 #endif /* _X86_64_PGALLOC_H */