ia64/xen-unstable

view extras/mini-os/include/x86/arch_mm.h @ 17829:cc4e471bbc08

minios: Fix >4GB machine addresses

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 10 16:59:24 2008 +0100 (2008-06-10)
parents 90c37c32182c
children c8d9ade45781
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
2 *
3 * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
4 * Copyright (c) 2005, Keir A Fraser
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
25 #ifndef _ARCH_MM_H_
26 #define _ARCH_MM_H_
28 #ifndef __ASSEMBLY__
29 #include <xen/xen.h>
30 #if defined(__i386__)
31 #include <xen/arch-x86_32.h>
32 #elif defined(__x86_64__)
33 #include <xen/arch-x86_64.h>
34 #else
35 #error "Unsupported architecture"
36 #endif
37 #endif
39 #define L1_FRAME 1
40 #define L2_FRAME 2
41 #define L3_FRAME 3
43 #define L1_PAGETABLE_SHIFT 12
45 #if defined(__i386__)
47 #define L2_PAGETABLE_SHIFT 21
48 #define L3_PAGETABLE_SHIFT 30
50 #define L1_PAGETABLE_ENTRIES 512
51 #define L2_PAGETABLE_ENTRIES 512
52 #define L3_PAGETABLE_ENTRIES 4
54 #define PADDR_BITS 44
55 #define PADDR_MASK ((1ULL << PADDR_BITS)-1)
57 #define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
59 /*
60 * If starting from virtual address greater than 0xc0000000,
61 * this value will be 2 to account for final mid-level page
62 * directory which is always mapped in at this location.
63 */
64 #define NOT_L1_FRAMES 3
65 #define PRIpte "016llx"
66 #ifndef __ASSEMBLY__
67 typedef uint64_t pgentry_t;
68 #endif
70 #elif defined(__x86_64__)
72 #define L2_PAGETABLE_SHIFT 21
73 #define L3_PAGETABLE_SHIFT 30
74 #define L4_PAGETABLE_SHIFT 39
76 #define L1_PAGETABLE_ENTRIES 512
77 #define L2_PAGETABLE_ENTRIES 512
78 #define L3_PAGETABLE_ENTRIES 512
79 #define L4_PAGETABLE_ENTRIES 512
81 /* These are page-table limitations. Current CPUs support only 40-bit phys. */
82 #define PADDR_BITS 52
83 #define VADDR_BITS 48
84 #define PADDR_MASK ((1UL << PADDR_BITS)-1)
85 #define VADDR_MASK ((1UL << VADDR_BITS)-1)
87 #define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1)
88 #define L3_MASK ((1UL << L4_PAGETABLE_SHIFT) - 1)
90 #define NOT_L1_FRAMES 3
91 #define PRIpte "016lx"
92 #ifndef __ASSEMBLY__
93 typedef unsigned long pgentry_t;
94 #endif
96 #endif
98 #define L1_MASK ((1UL << L2_PAGETABLE_SHIFT) - 1)
100 /* Given a virtual address, get an entry offset into a page table. */
101 #define l1_table_offset(_a) \
102 (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
103 #define l2_table_offset(_a) \
104 (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
105 #define l3_table_offset(_a) \
106 (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
107 #if defined(__x86_64__)
108 #define l4_table_offset(_a) \
109 (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
110 #endif
112 #define _PAGE_PRESENT 0x001ULL
113 #define _PAGE_RW 0x002ULL
114 #define _PAGE_USER 0x004ULL
115 #define _PAGE_PWT 0x008ULL
116 #define _PAGE_PCD 0x010ULL
117 #define _PAGE_ACCESSED 0x020ULL
118 #define _PAGE_DIRTY 0x040ULL
119 #define _PAGE_PAT 0x080ULL
120 #define _PAGE_PSE 0x080ULL
121 #define _PAGE_GLOBAL 0x100ULL
123 #if defined(__i386__)
124 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
125 #define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED)
126 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
127 #define L3_PROT (_PAGE_PRESENT)
128 #elif defined(__x86_64__)
129 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
130 #define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_USER)
131 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
132 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
133 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
134 #endif /* __i386__ || __x86_64__ */
136 #include "arch_limits.h"
137 #define PAGE_SIZE __PAGE_SIZE
138 #define PAGE_SHIFT __PAGE_SHIFT
139 #define PAGE_MASK (~(PAGE_SIZE-1))
141 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> L1_PAGETABLE_SHIFT)
142 #define PFN_DOWN(x) ((x) >> L1_PAGETABLE_SHIFT)
143 #define PFN_PHYS(x) ((uint64_t)(x) << L1_PAGETABLE_SHIFT)
144 #define PHYS_PFN(x) ((x) >> L1_PAGETABLE_SHIFT)
146 /* to align the pointer to the (next) page boundary */
147 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
149 #ifndef __ASSEMBLY__
150 /* Definitions for machine and pseudophysical addresses. */
151 #ifdef __i386__
152 typedef unsigned long long paddr_t;
153 typedef unsigned long long maddr_t;
154 #else
155 typedef unsigned long paddr_t;
156 typedef unsigned long maddr_t;
157 #endif
159 extern unsigned long *phys_to_machine_mapping;
160 extern char _text, _etext, _erodata, _edata, _end;
161 extern unsigned long mfn_zero;
162 #define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
163 static __inline__ maddr_t phys_to_machine(paddr_t phys)
164 {
165 maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
166 machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
167 return machine;
168 }
170 #define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
171 static __inline__ paddr_t machine_to_phys(maddr_t machine)
172 {
173 paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
174 phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
175 return phys;
176 }
177 #endif
179 #define VIRT_START ((unsigned long)&_text)
181 #define to_phys(x) ((unsigned long)(x)-VIRT_START)
182 #define to_virt(x) ((void *)((unsigned long)(x)+VIRT_START))
184 #define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt)))
185 #define virt_to_mfn(_virt) (pfn_to_mfn(virt_to_pfn(_virt)))
186 #define mach_to_virt(_mach) (to_virt(machine_to_phys(_mach)))
187 #define virt_to_mach(_virt) (phys_to_machine(to_phys(_virt)))
188 #define mfn_to_virt(_mfn) (to_virt(mfn_to_pfn(_mfn) << PAGE_SHIFT))
189 #define pfn_to_virt(_pfn) (to_virt((_pfn) << PAGE_SHIFT))
191 /* Pagetable walking. */
192 #define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT)
193 #define pte_to_virt(_pte) to_virt(mfn_to_pfn(pte_to_mfn(_pte)) << PAGE_SHIFT)
196 #define PT_BASE ((pgentry_t *)start_info.pt_base)
198 #ifdef __x86_64__
199 #define virtual_to_l3(_virt) ((pgentry_t *)pte_to_virt(PT_BASE[l4_table_offset(_virt)]))
200 #else
201 #define virtual_to_l3(_virt) PT_BASE
202 #endif
204 #define virtual_to_l2(_virt) ({ \
205 unsigned long __virt2 = (_virt); \
206 (pgentry_t *) pte_to_virt(virtual_to_l3(__virt2)[l3_table_offset(__virt2)]); \
207 })
209 #define virtual_to_l1(_virt) ({ \
210 unsigned long __virt1 = (_virt); \
211 (pgentry_t *) pte_to_virt(virtual_to_l2(__virt1)[l2_table_offset(__virt1)]); \
212 })
214 #define virtual_to_pte(_virt) ({ \
215 unsigned long __virt0 = (unsigned long) (_virt); \
216 virtual_to_l1(__virt0)[l1_table_offset(__virt0)]; \
217 })
218 #define virtual_to_mfn(_virt) pte_to_mfn(virtual_to_pte(_virt))
220 #define map_frames(f, n) map_frames_ex(f, n, 1, 0, 1, DOMID_SELF, 0, L1_PROT)
221 #define map_zero(n, a) map_frames_ex(&mfn_zero, n, 0, 0, a, DOMID_SELF, 0, L1_PROT_RO)
222 #define do_map_zero(start, n) do_map_frames(start, &mfn_zero, n, 0, 0, DOMID_SELF, 0, L1_PROT_RO)
224 #endif /* _ARCH_MM_H_ */