ia64/xen-unstable

view linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/page.h @ 5066:c4353a81ae5b

bitkeeper revision 1.1159.258.150 (428e6fdeb6W1XbDj2YkcF53xkdUozA)

[PATCH] Re: PAE

> We're taking quite a chunk out of lowmem, though....

Uhm, well, no, we don't. We should though, fix is below.

Current code in unstable takes the address space away from the
vmalloc area, not the lowmem area. vmalloc space is 128 MB by
default, so the non-PAE hypervisor hole fits in and it works
nevertheless. The larger PAE mode hypervisor hole doesn't fit
in, so it breaks when you use enougth memory (somewhere around
800-900 MB). I think that is the bug Scott Parish trapped into.

Gerd
author kraxel@bytesex.org[kaf24]
date Fri May 20 23:16:46 2005 +0000 (2005-05-20)
parents a71203a12503
children 6640eb3cb41d
line source
1 #ifndef _I386_PAGE_H
2 #define _I386_PAGE_H
4 /* PAGE_SHIFT determines the page size */
5 #define PAGE_SHIFT 12
6 #define PAGE_SIZE (1UL << PAGE_SHIFT)
7 #define PAGE_MASK (~(PAGE_SIZE-1))
9 #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
10 #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
12 #ifdef __KERNEL__
13 #ifndef __ASSEMBLY__
15 #include <linux/config.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <asm-xen/xen-public/xen.h>
19 #include <asm-xen/foreign_page.h>
21 #define arch_free_page(_page,_order) \
22 ({ int foreign = PageForeign(_page); \
23 if (foreign) \
24 (PageForeignDestructor(_page))(_page); \
25 foreign; \
26 })
27 #define HAVE_ARCH_FREE_PAGE
29 #ifdef CONFIG_XEN_SCRUB_PAGES
30 #define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
31 #else
32 #define scrub_pages(_p,_n) ((void)0)
33 #endif
35 #ifdef CONFIG_X86_USE_3DNOW
37 #include <asm/mmx.h>
39 #define clear_page(page) mmx_clear_page((void *)(page))
40 #define copy_page(to,from) mmx_copy_page(to,from)
42 #else
44 #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
45 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
47 /*
48 * On older X86 processors it's not a win to use MMX here it seems.
49 * Maybe the K6-III ?
50 */
52 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
53 #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
55 #endif
57 #define clear_user_page(page, vaddr, pg) clear_page(page)
58 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
60 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
61 extern unsigned int *phys_to_machine_mapping;
62 #define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)]))
63 #define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)]))
64 static inline unsigned long phys_to_machine(unsigned long phys)
65 {
66 unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
67 machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
68 return machine;
69 }
70 static inline unsigned long machine_to_phys(unsigned long machine)
71 {
72 unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
73 phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
74 return phys;
75 }
77 /*
78 * These are used to make use of C type-checking..
79 */
80 extern int nx_enabled;
81 #ifdef CONFIG_X86_PAE
82 extern unsigned long long __supported_pte_mask;
83 typedef struct { unsigned long pte_low, pte_high; } pte_t;
84 typedef struct { unsigned long long pmd; } pmd_t;
85 typedef struct { unsigned long long pgd; } pgd_t;
86 typedef struct { unsigned long long pgprot; } pgprot_t;
87 #define pmd_val(x) ((x).pmd)
88 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
89 #define __pmd(x) ((pmd_t) { (x) } )
90 #define HPAGE_SHIFT 21
91 #else
92 typedef struct { unsigned long pte_low; } pte_t;
93 typedef struct { unsigned long pgd; } pgd_t;
94 typedef struct { unsigned long pgprot; } pgprot_t;
95 #define boot_pte_t pte_t /* or would you rather have a typedef */
96 #define pte_val(x) (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
97 (x).pte_low)
98 #define pte_val_ma(x) ((x).pte_low)
99 #define HPAGE_SHIFT 22
100 #endif
101 #define PTE_MASK PAGE_MASK
103 #ifdef CONFIG_HUGETLB_PAGE
104 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
105 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
106 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
107 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
108 #endif
111 static inline unsigned long pgd_val(pgd_t x)
112 {
113 unsigned long ret = x.pgd;
114 if (ret) ret = machine_to_phys(ret);
115 return ret;
116 }
117 #define pgprot_val(x) ((x).pgprot)
119 #define __pte(x) ({ unsigned long _x = (x); \
120 (((_x)&1) ? ((pte_t) {phys_to_machine(_x)}) : ((pte_t) {(_x)})); })
121 #define __pte_ma(x) ((pte_t) { (x) } )
122 #define __pgd(x) ({ unsigned long _x = (x); \
123 (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
124 #define __pgprot(x) ((pgprot_t) { (x) } )
126 #endif /* !__ASSEMBLY__ */
128 /* to align the pointer to the (next) page boundary */
129 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
131 /*
132 * This handles the memory map.. We could make this a config
133 * option, but too many people screw it up, and too few need
134 * it.
135 *
136 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
137 * a virtual address space of one gigabyte, which limits the
138 * amount of physical memory you can use to about 950MB.
139 *
140 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
141 * and CONFIG_HIGHMEM64G options in the kernel configuration.
142 */
144 #ifndef __ASSEMBLY__
146 /*
147 * This much address space is reserved for vmalloc() and iomap()
148 * as well as fixmap mappings.
149 */
150 extern unsigned int __VMALLOC_RESERVE;
152 /* Pure 2^n version of get_order */
153 static __inline__ int get_order(unsigned long size)
154 {
155 int order;
157 size = (size-1) >> (PAGE_SHIFT-1);
158 order = -1;
159 do {
160 size >>= 1;
161 order++;
162 } while (size);
163 return order;
164 }
166 extern int sysctl_legacy_va_layout;
168 #endif /* __ASSEMBLY__ */
170 #ifdef __ASSEMBLY__
171 #define __PAGE_OFFSET (0xC0000000)
172 #else
173 #define __PAGE_OFFSET (0xC0000000UL)
174 #endif
177 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
178 #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
179 #define MAXMEM (HYPERVISOR_VIRT_START-__PAGE_OFFSET-__VMALLOC_RESERVE)
180 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
181 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
182 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
183 #ifndef CONFIG_DISCONTIGMEM
184 #define pfn_to_page(pfn) (mem_map + (pfn))
185 #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
186 #define pfn_valid(pfn) ((pfn) < max_mapnr)
187 #endif /* !CONFIG_DISCONTIGMEM */
188 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
190 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
192 #define VM_DATA_DEFAULT_FLAGS \
193 (VM_READ | VM_WRITE | \
194 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
195 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
197 /* VIRT <-> MACHINE conversion */
198 #define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
199 #define machine_to_virt(_m) (__va(machine_to_phys(_m)))
201 #endif /* __KERNEL__ */
203 #endif /* _I386_PAGE_H */