ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/page.h @ 6415:b88c5350de57

This patch fixes Bugzilla #169. (It should fix #187 too -- Keir).

The root cause of bug 169 is, machine_to_phys_mapping, starting from
0xffff800000000000, is mapped using 2M pages. When the system has RAM
no more than 2G, only one 2M page is allocated and only one PDE entry is
created correspondingly, so calling mfn_to_pfn with mfn > 0x80000 will
overflow this 2M page and cause a unable handled kernel paging request.
The mfn > 0x80000 comes from PCI device I/O memory, here from AGP
display card when booting X server. Jun suggested to use something like
get_user() when accessing machine_to_phys_mapping.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Aug 25 16:21:19 2005 +0000 (2005-08-25)
parents 522bc50588ed
children b54144915ae6 0610add7c3fe b4b3f6be5226
line source
1 #ifndef _X86_64_PAGE_H
2 #define _X86_64_PAGE_H
4 #include <linux/config.h>
5 /* #include <linux/string.h> */
6 #ifndef __ASSEMBLY__
7 #include <linux/types.h>
8 #endif
9 #include <asm-xen/xen-public/xen.h>
10 #include <asm-xen/foreign_page.h>
12 #define arch_free_page(_page,_order) \
13 ({ int foreign = PageForeign(_page); \
14 if (foreign) \
15 (PageForeignDestructor(_page))(_page); \
16 foreign; \
17 })
18 #define HAVE_ARCH_FREE_PAGE
20 #ifdef CONFIG_XEN_SCRUB_PAGES
21 #define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
22 #else
23 #define scrub_pages(_p,_n) ((void)0)
24 #endif
26 /* PAGE_SHIFT determines the page size */
27 #define PAGE_SHIFT 12
28 #ifdef __ASSEMBLY__
29 #define PAGE_SIZE (0x1 << PAGE_SHIFT)
30 #else
31 #define PAGE_SIZE (1UL << PAGE_SHIFT)
32 #endif
33 #define PAGE_MASK (~(PAGE_SIZE-1))
34 #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT))
36 #define THREAD_ORDER 1
37 #ifdef __ASSEMBLY__
38 #define THREAD_SIZE (1 << (PAGE_SHIFT + THREAD_ORDER))
39 #else
40 #define THREAD_SIZE (1UL << (PAGE_SHIFT + THREAD_ORDER))
41 #endif
42 #define CURRENT_MASK (~(THREAD_SIZE-1))
44 #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
45 #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
47 #define HPAGE_SHIFT PMD_SHIFT
48 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
49 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
50 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
52 #ifdef __KERNEL__
53 #ifndef __ASSEMBLY__
55 void clear_page(void *);
56 void copy_page(void *, void *);
58 #define clear_user_page(page, vaddr, pg) clear_page(page)
59 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
61 #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
62 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
64 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
65 #define INVALID_P2M_ENTRY (~0U)
66 #define FOREIGN_FRAME(m) ((m) | 0x80000000U)
67 extern u32 *phys_to_machine_mapping;
68 #define pfn_to_mfn(pfn) \
69 ((unsigned long)phys_to_machine_mapping[(unsigned int)(pfn)] & 0x7FFFFFFFUL)
70 static inline unsigned long mfn_to_pfn(unsigned long mfn)
71 {
72 unsigned int pfn;
74 /*
75 * The array access can fail (e.g., device space beyond end of RAM).
76 * In such cases it doesn't matter what we return (we return garbage),
77 * but we must handle the fault without crashing!
78 */
79 asm (
80 "1: movl %1,%k0\n"
81 "2:\n"
82 ".section __ex_table,\"a\"\n"
83 " .align 8\n"
84 " .quad 1b,2b\n"
85 ".previous"
86 : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) );
88 return (unsigned long)pfn;
89 }
91 /* Definitions for machine and pseudophysical addresses. */
92 typedef unsigned long paddr_t;
93 typedef unsigned long maddr_t;
95 static inline maddr_t phys_to_machine(paddr_t phys)
96 {
97 maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
98 machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
99 return machine;
100 }
102 static inline paddr_t machine_to_phys(maddr_t machine)
103 {
104 paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
105 phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
106 return phys;
107 }
109 /*
110 * These are used to make use of C type-checking..
111 */
112 typedef struct { unsigned long pte; } pte_t;
113 typedef struct { unsigned long pmd; } pmd_t;
114 typedef struct { unsigned long pud; } pud_t;
115 typedef struct { unsigned long pgd; } pgd_t;
116 #define PTE_MASK PHYSICAL_PAGE_MASK
118 typedef struct { unsigned long pgprot; } pgprot_t;
120 #define pte_val(x) (((x).pte & 1) ? machine_to_phys((x).pte) : \
121 (x).pte)
122 #define pte_val_ma(x) ((x).pte)
124 static inline unsigned long pmd_val(pmd_t x)
125 {
126 unsigned long ret = x.pmd;
127 if (ret) ret = machine_to_phys(ret);
128 return ret;
129 }
131 static inline unsigned long pud_val(pud_t x)
132 {
133 unsigned long ret = x.pud;
134 if (ret) ret = machine_to_phys(ret);
135 return ret;
136 }
138 static inline unsigned long pgd_val(pgd_t x)
139 {
140 unsigned long ret = x.pgd;
141 if (ret) ret = machine_to_phys(ret);
142 return ret;
143 }
145 #define pgprot_val(x) ((x).pgprot)
147 #define __pte_ma(x) ((pte_t) { (x) } )
149 static inline pte_t __pte(unsigned long x)
150 {
151 if (x & 1) x = phys_to_machine(x);
152 return ((pte_t) { (x) });
153 }
155 static inline pmd_t __pmd(unsigned long x)
156 {
157 if ((x & 1)) x = phys_to_machine(x);
158 return ((pmd_t) { (x) });
159 }
161 static inline pud_t __pud(unsigned long x)
162 {
163 if ((x & 1)) x = phys_to_machine(x);
164 return ((pud_t) { (x) });
165 }
167 static inline pgd_t __pgd(unsigned long x)
168 {
169 if ((x & 1)) x = phys_to_machine(x);
170 return ((pgd_t) { (x) });
171 }
173 #define __pgprot(x) ((pgprot_t) { (x) } )
175 #define __START_KERNEL 0xffffffff80100000UL
176 #define __START_KERNEL_map 0xffffffff80000000UL
177 #define __PAGE_OFFSET 0xffff880000000000UL
179 #else
180 #define __START_KERNEL 0xffffffff80100000
181 #define __START_KERNEL_map 0xffffffff80000000
182 #define __PAGE_OFFSET 0xffff880000000000
183 #endif /* !__ASSEMBLY__ */
185 /* to align the pointer to the (next) page boundary */
186 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
188 /* See Documentation/x86_64/mm.txt for a description of the memory map. */
189 #define __PHYSICAL_MASK_SHIFT 46
190 #define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
191 #define __VIRTUAL_MASK_SHIFT 48
192 #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
194 #define KERNEL_TEXT_SIZE (40UL*1024*1024)
195 #define KERNEL_TEXT_START 0xffffffff80000000UL
197 #ifndef __ASSEMBLY__
199 #include <asm/bug.h>
201 /* Pure 2^n version of get_order */
202 extern __inline__ int get_order(unsigned long size)
203 {
204 int order;
206 size = (size-1) >> (PAGE_SHIFT-1);
207 order = -1;
208 do {
209 size >>= 1;
210 order++;
211 } while (size);
212 return order;
213 }
215 #endif /* __ASSEMBLY__ */
217 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
219 /* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
220 Otherwise you risk miscompilation. */
221 #define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
222 /* __pa_symbol should be used for C visible symbols.
223 This seems to be the official gcc blessed way to do such arithmetic. */
224 #define __pa_symbol(x) \
225 ({unsigned long v; \
226 asm("" : "=r" (v) : "0" (x)); \
227 __pa(v); })
229 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
230 #ifndef CONFIG_DISCONTIGMEM
231 #define pfn_to_page(pfn) (mem_map + (pfn))
232 #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
233 #define pfn_valid(pfn) ((pfn) < max_mapnr)
234 #endif
236 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
237 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
238 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
240 /* VIRT <-> MACHINE conversion */
241 #define virt_to_machine(v) (phys_to_machine(__pa(v)))
242 #define machine_to_virt(m) (__va(machine_to_phys(m)))
243 #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
244 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
246 #define VM_DATA_DEFAULT_FLAGS \
247 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
248 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
250 #define __HAVE_ARCH_GATE_AREA 1
252 #endif /* __KERNEL__ */
254 #endif /* _X86_64_PAGE_H */