ia64/xen-unstable

view extras/mini-os/include/mm.h @ 10843:4f6d858ea570

[PCI] Per-device permissive flag (replaces global permissive flag).
Signed-off-by: Chris Bookholt <hap10@tycho.ncsc.mil>
author kfraser@localhost.localdomain
date Fri Jul 28 12:56:10 2006 +0100 (2006-07-28)
parents 4db818a7dc3f
children a3c6479c87ef
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
2 *
3 * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
4 * Copyright (c) 2005, Keir A Fraser
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
25 #ifndef _MM_H_
26 #define _MM_H_
28 #if defined(__i386__)
29 #include <xen/arch-x86_32.h>
30 #elif defined(__x86_64__)
31 #include <xen/arch-x86_64.h>
32 #else
33 #error "Unsupported architecture"
34 #endif
36 #include <lib.h>
38 #define L1_FRAME 1
39 #define L2_FRAME 2
40 #define L3_FRAME 3
42 #define L1_PAGETABLE_SHIFT 12
44 #if defined(__i386__)
46 #if !defined(CONFIG_X86_PAE)
48 #define L2_PAGETABLE_SHIFT 22
50 #define L1_PAGETABLE_ENTRIES 1024
51 #define L2_PAGETABLE_ENTRIES 1024
53 #define PADDR_BITS 32
54 #define PADDR_MASK (~0UL)
56 #define NOT_L1_FRAMES 1
57 #define PRIpte "08lx"
58 typedef unsigned long pgentry_t;
60 #else /* defined(CONFIG_X86_PAE) */
62 #define L2_PAGETABLE_SHIFT 21
63 #define L3_PAGETABLE_SHIFT 30
65 #define L1_PAGETABLE_ENTRIES 512
66 #define L2_PAGETABLE_ENTRIES 512
67 #define L3_PAGETABLE_ENTRIES 4
69 #define PADDR_BITS 44
70 #define PADDR_MASK ((1ULL << PADDR_BITS)-1)
72 #define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
74 /*
75 * If starting from virtual address greater than 0xc0000000,
76 * this value will be 2 to account for final mid-level page
77 * directory which is always mapped in at this location.
78 */
79 #define NOT_L1_FRAMES 3
80 #define PRIpte "016llx"
81 typedef uint64_t pgentry_t;
83 #endif /* !defined(CONFIG_X86_PAE) */
85 #elif defined(__x86_64__)
87 #define L2_PAGETABLE_SHIFT 21
88 #define L3_PAGETABLE_SHIFT 30
89 #define L4_PAGETABLE_SHIFT 39
91 #define L1_PAGETABLE_ENTRIES 512
92 #define L2_PAGETABLE_ENTRIES 512
93 #define L3_PAGETABLE_ENTRIES 512
94 #define L4_PAGETABLE_ENTRIES 512
96 /* These are page-table limitations. Current CPUs support only 40-bit phys. */
97 #define PADDR_BITS 52
98 #define VADDR_BITS 48
99 #define PADDR_MASK ((1UL << PADDR_BITS)-1)
100 #define VADDR_MASK ((1UL << VADDR_BITS)-1)
102 #define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
103 #define L3_MASK ((1UL << L4_PAGETABLE_SHIFT) - 1)
105 #define NOT_L1_FRAMES 3
106 #define PRIpte "016lx"
107 typedef unsigned long pgentry_t;
109 #endif
111 #define L1_MASK ((1UL << L2_PAGETABLE_SHIFT) - 1)
113 /* Given a virtual address, get an entry offset into a page table. */
114 #define l1_table_offset(_a) \
115 (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
116 #define l2_table_offset(_a) \
117 (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
118 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
119 #define l3_table_offset(_a) \
120 (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
121 #endif
122 #if defined(__x86_64__)
123 #define l4_table_offset(_a) \
124 (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
125 #endif
127 #define _PAGE_PRESENT 0x001UL
128 #define _PAGE_RW 0x002UL
129 #define _PAGE_USER 0x004UL
130 #define _PAGE_PWT 0x008UL
131 #define _PAGE_PCD 0x010UL
132 #define _PAGE_ACCESSED 0x020UL
133 #define _PAGE_DIRTY 0x040UL
134 #define _PAGE_PAT 0x080UL
135 #define _PAGE_PSE 0x080UL
136 #define _PAGE_GLOBAL 0x100UL
138 #if defined(__i386__)
139 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
140 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
141 #if defined(CONFIG_X86_PAE)
142 #define L3_PROT (_PAGE_PRESENT)
143 #endif /* CONFIG_X86_PAE */
144 #elif defined(__x86_64__)
145 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
146 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
147 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
148 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
149 #endif /* __i386__ || __x86_64__ */
151 #ifndef CONFIG_X86_PAE
152 #define PAGE_SIZE (1UL << L1_PAGETABLE_SHIFT)
153 #else
154 #define PAGE_SIZE (1ULL << L1_PAGETABLE_SHIFT)
155 #endif
156 #define PAGE_SHIFT L1_PAGETABLE_SHIFT
157 #define PAGE_MASK (~(PAGE_SIZE-1))
159 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> L1_PAGETABLE_SHIFT)
160 #define PFN_DOWN(x) ((x) >> L1_PAGETABLE_SHIFT)
161 #define PFN_PHYS(x) ((x) << L1_PAGETABLE_SHIFT)
163 /* to align the pointer to the (next) page boundary */
164 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
166 /* Definitions for machine and pseudophysical addresses. */
167 #ifdef CONFIG_X86_PAE
168 typedef unsigned long long paddr_t;
169 typedef unsigned long long maddr_t;
170 #else
171 typedef unsigned long paddr_t;
172 typedef unsigned long maddr_t;
173 #endif
175 extern unsigned long *phys_to_machine_mapping;
176 extern char _text, _etext, _edata, _end;
177 #define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
178 static __inline__ maddr_t phys_to_machine(paddr_t phys)
179 {
180 maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
181 machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
182 return machine;
183 }
185 #define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
186 static __inline__ paddr_t machine_to_phys(maddr_t machine)
187 {
188 paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
189 phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
190 return phys;
191 }
193 #define VIRT_START ((unsigned long)&_text)
195 #define to_phys(x) ((unsigned long)(x)-VIRT_START)
196 #define to_virt(x) ((void *)((unsigned long)(x)+VIRT_START))
198 #define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt)))
199 #define virt_to_mfn(_virt) (pfn_to_mfn(virt_to_pfn(_virt)))
200 #define mach_to_virt(_mach) (to_virt(machine_to_phys(_mach)))
201 #define virt_to_mach(_virt) (phys_to_machine(to_phys(_virt)))
202 #define mfn_to_virt(_mfn) (to_virt(mfn_to_pfn(_mfn) << PAGE_SHIFT))
203 #define pfn_to_virt(_pfn) (to_virt((_pfn) << PAGE_SHIFT))
205 /* Pagetable walking. */
206 #define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT)
207 #define pte_to_virt(_pte) to_virt(mfn_to_pfn(pte_to_mfn(_pte)) << PAGE_SHIFT)
209 void init_mm(void);
210 unsigned long alloc_pages(int order);
211 #define alloc_page() alloc_pages(0)
212 void free_pages(void *pointer, int order);
214 static __inline__ int get_order(unsigned long size)
215 {
216 int order;
217 size = (size-1) >> PAGE_SHIFT;
218 for ( order = 0; size; order++ )
219 size >>= 1;
220 return order;
221 }
224 void *map_frames(unsigned long *f, unsigned long n);
226 #endif /* _MM_H_ */