ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/page.h @ 8534:da7873110bbb

Tiny bootstrap cleanup.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jan 09 19:46:46 2006 +0100 (2006-01-09)
parents a08aef9f1c8e
children
line source
1 #ifndef _X86_64_PAGE_H
2 #define _X86_64_PAGE_H
4 #include <linux/config.h>
5 /* #include <linux/string.h> */
6 #ifndef __ASSEMBLY__
7 #include <linux/types.h>
8 #endif
9 #include <asm-xen/xen-public/xen.h>
10 #include <asm-xen/foreign_page.h>
12 #define arch_free_page(_page,_order) \
13 ({ int foreign = PageForeign(_page); \
14 if (foreign) \
15 (PageForeignDestructor(_page))(_page); \
16 foreign; \
17 })
18 #define HAVE_ARCH_FREE_PAGE
20 #ifdef CONFIG_XEN_SCRUB_PAGES
21 #define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
22 #else
23 #define scrub_pages(_p,_n) ((void)0)
24 #endif
26 /* PAGE_SHIFT determines the page size */
27 #define PAGE_SHIFT 12
28 #ifdef __ASSEMBLY__
29 #define PAGE_SIZE (0x1 << PAGE_SHIFT)
30 #else
31 #define PAGE_SIZE (1UL << PAGE_SHIFT)
32 #endif
33 #define PAGE_MASK (~(PAGE_SIZE-1))
34 #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT))
36 #define THREAD_ORDER 1
37 #ifdef __ASSEMBLY__
38 #define THREAD_SIZE (1 << (PAGE_SHIFT + THREAD_ORDER))
39 #else
40 #define THREAD_SIZE (1UL << (PAGE_SHIFT + THREAD_ORDER))
41 #endif
42 #define CURRENT_MASK (~(THREAD_SIZE-1))
44 #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
45 #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
47 #define HPAGE_SHIFT PMD_SHIFT
48 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
49 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
50 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
52 #ifdef __KERNEL__
53 #ifndef __ASSEMBLY__
55 void clear_page(void *);
56 void copy_page(void *, void *);
58 #define clear_user_page(page, vaddr, pg) clear_page(page)
59 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
61 #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
62 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
64 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
65 #define INVALID_P2M_ENTRY (~0UL)
66 #define FOREIGN_FRAME(m) ((m) | (1UL<<63))
67 extern unsigned long *phys_to_machine_mapping;
68 #define pfn_to_mfn(pfn) \
69 (phys_to_machine_mapping[(unsigned int)(pfn)] & ~(1UL << 63))
70 #define phys_to_machine_mapping_valid(pfn) \
71 (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY)
72 static inline unsigned long mfn_to_pfn(unsigned long mfn)
73 {
74 unsigned long pfn;
76 /*
77 * The array access can fail (e.g., device space beyond end of RAM).
78 * In such cases it doesn't matter what we return (we return garbage),
79 * but we must handle the fault without crashing!
80 */
81 asm (
82 "1: movq %1,%0\n"
83 "2:\n"
84 ".section __ex_table,\"a\"\n"
85 " .align 8\n"
86 " .quad 1b,2b\n"
87 ".previous"
88 : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) );
90 return pfn;
91 }
93 static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
94 {
95 phys_to_machine_mapping[pfn] = mfn;
96 }
98 /* Definitions for machine and pseudophysical addresses. */
99 typedef unsigned long paddr_t;
100 typedef unsigned long maddr_t;
102 static inline maddr_t phys_to_machine(paddr_t phys)
103 {
104 maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
105 machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
106 return machine;
107 }
109 static inline paddr_t machine_to_phys(maddr_t machine)
110 {
111 paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
112 phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
113 return phys;
114 }
116 /*
117 * These are used to make use of C type-checking..
118 */
119 typedef struct { unsigned long pte; } pte_t;
120 typedef struct { unsigned long pmd; } pmd_t;
121 typedef struct { unsigned long pud; } pud_t;
122 typedef struct { unsigned long pgd; } pgd_t;
123 #define PTE_MASK PHYSICAL_PAGE_MASK
125 typedef struct { unsigned long pgprot; } pgprot_t;
127 #define pte_val(x) (((x).pte & 1) ? machine_to_phys((x).pte) : \
128 (x).pte)
129 #define pte_val_ma(x) ((x).pte)
131 static inline unsigned long pmd_val(pmd_t x)
132 {
133 unsigned long ret = x.pmd;
134 if (ret) ret = machine_to_phys(ret);
135 return ret;
136 }
138 static inline unsigned long pud_val(pud_t x)
139 {
140 unsigned long ret = x.pud;
141 if (ret) ret = machine_to_phys(ret);
142 return ret;
143 }
145 static inline unsigned long pgd_val(pgd_t x)
146 {
147 unsigned long ret = x.pgd;
148 if (ret) ret = machine_to_phys(ret);
149 return ret;
150 }
152 #define pgprot_val(x) ((x).pgprot)
154 #define __pte_ma(x) ((pte_t) { (x) } )
156 static inline pte_t __pte(unsigned long x)
157 {
158 if (x & 1) x = phys_to_machine(x);
159 return ((pte_t) { (x) });
160 }
162 static inline pmd_t __pmd(unsigned long x)
163 {
164 if ((x & 1)) x = phys_to_machine(x);
165 return ((pmd_t) { (x) });
166 }
168 static inline pud_t __pud(unsigned long x)
169 {
170 if ((x & 1)) x = phys_to_machine(x);
171 return ((pud_t) { (x) });
172 }
174 static inline pgd_t __pgd(unsigned long x)
175 {
176 if ((x & 1)) x = phys_to_machine(x);
177 return ((pgd_t) { (x) });
178 }
180 #define __pgprot(x) ((pgprot_t) { (x) } )
182 #define __START_KERNEL 0xffffffff80100000UL
183 #define __START_KERNEL_map 0xffffffff80000000UL
184 #define __PAGE_OFFSET 0xffff880000000000UL
186 #else
187 #define __START_KERNEL 0xffffffff80100000
188 #define __START_KERNEL_map 0xffffffff80000000
189 #define __PAGE_OFFSET 0xffff880000000000
190 #endif /* !__ASSEMBLY__ */
192 /* to align the pointer to the (next) page boundary */
193 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
195 /* See Documentation/x86_64/mm.txt for a description of the memory map. */
196 #define __PHYSICAL_MASK_SHIFT 46
197 #define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
198 #define __VIRTUAL_MASK_SHIFT 48
199 #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
201 #define KERNEL_TEXT_SIZE (40UL*1024*1024)
202 #define KERNEL_TEXT_START 0xffffffff80000000UL
204 #ifndef __ASSEMBLY__
206 #include <asm/bug.h>
208 /* Pure 2^n version of get_order */
209 extern __inline__ int get_order(unsigned long size)
210 {
211 int order;
213 size = (size-1) >> (PAGE_SHIFT-1);
214 order = -1;
215 do {
216 size >>= 1;
217 order++;
218 } while (size);
219 return order;
220 }
222 #endif /* __ASSEMBLY__ */
224 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
226 /* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
227 Otherwise you risk miscompilation. */
228 #define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
229 /* __pa_symbol should be used for C visible symbols.
230 This seems to be the official gcc blessed way to do such arithmetic. */
231 #define __pa_symbol(x) \
232 ({unsigned long v; \
233 asm("" : "=r" (v) : "0" (x)); \
234 __pa(v); })
236 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
237 #ifndef CONFIG_DISCONTIGMEM
238 #define pfn_to_page(pfn) (mem_map + (pfn))
239 #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
240 #define pfn_valid(pfn) ((pfn) < max_mapnr)
241 #endif
243 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
244 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
245 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
247 /* VIRT <-> MACHINE conversion */
248 #define virt_to_machine(v) (phys_to_machine(__pa(v)))
249 #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
250 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
252 #define VM_DATA_DEFAULT_FLAGS \
253 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
254 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
256 #define __HAVE_ARCH_GATE_AREA 1
258 #endif /* __KERNEL__ */
260 #endif /* _X86_64_PAGE_H */