ia64/xen-unstable

view xen/include/asm-x86/p2m.h @ 15905:45dbef0ab7a6

[XEN] Fix assert in typed p2m code
as spotted by GCC-4's enthusiastic warnings.
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Wed Sep 12 09:58:16 2007 +0100 (2007-09-12)
parents 4633e9604da9
children a79d2c043643
line source
1 /******************************************************************************
2 * include/asm-x86/paging.h
3 *
4 * physical-to-machine mappings for automatically-translated domains.
5 *
6 * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
7 * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
8 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
9 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
26 #ifndef _XEN_P2M_H
27 #define _XEN_P2M_H
30 /*
31 * The phys_to_machine_mapping maps guest physical frame numbers
32 * to machine frame numbers. It only exists for paging_mode_translate
33 * guests. It is organised in page-table format, which:
34 *
35 * (1) allows us to use it directly as the second pagetable in hardware-
36 * assisted paging and (hopefully) iommu support; and
37 * (2) lets us map it directly into the guest vcpus' virtual address space
38 * as a linear pagetable, so we can read and write it easily.
39 *
40 * For (2) we steal the address space that would have normally been used
41 * by the read-only MPT map in a non-translated guest. (For
42 * paging_mode_external() guests this mapping is in the monitor table.)
43 */
44 #define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
46 /*
47 * The upper levels of the p2m pagetable always contain full rights; all
48 * variation in the access control bits is made in the level-1 PTEs.
49 *
50 * In addition to the phys-to-machine translation, each p2m PTE contains
51 * *type* information about the gfn it translates, helping Xen to decide
52 * on the correct course of action when handling a page-fault to that
53 * guest frame. We store the type in the "available" bits of the PTEs
54 * in the table, which gives us 8 possible types on 32-bit systems.
55 * Further expansions of the type system will only be supported on
56 * 64-bit Xen.
57 */
58 typedef enum {
59 p2m_invalid = 0, /* Nothing mapped here */
60 p2m_ram_rw = 1, /* Normal read/write guest RAM */
61 p2m_ram_logdirty = 2, /* Temporarily read-only for log-dirty */
62 p2m_ram_ro = 3, /* Read-only; writes go to the device model */
63 p2m_mmio_dm = 4, /* Reads and write go to the device model */
64 p2m_mmio_direct = 5, /* Read/write mapping of genuine MMIO area */
65 } p2m_type_t;
67 /* We use bitmaps and maks to handle groups of types */
68 #define p2m_to_mask(_t) (1UL << (_t))
70 /* RAM types, which map to real machine frames */
71 #define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) \
72 | p2m_to_mask(p2m_ram_logdirty) \
73 | p2m_to_mask(p2m_ram_ro))
75 /* MMIO types, which don't have to map to anything in the frametable */
76 #define P2M_MMIO_TYPES (p2m_to_mask(p2m_mmio_dm) \
77 | p2m_to_mask(p2m_mmio_direct))
79 /* Read-only types, which must have the _PAGE_RW bit clear in their PTEs */
80 #define P2M_RO_TYPES (p2m_to_mask(p2m_ram_logdirty) \
81 | p2m_to_mask(p2m_ram_ro))
83 /* Useful predicates */
84 #define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES)
85 #define p2m_is_mmio(_t) (p2m_to_mask(_t) & P2M_MMIO_TYPES)
86 #define p2m_is_readonly(_t) (p2m_to_mask(_t) & P2M_RO_TYPES)
87 #define p2m_is_valid(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | P2M_MMIO_TYPES))
89 /* Extract the type from the PTE flags that store it */
90 static inline p2m_type_t p2m_flags_to_type(unsigned long flags)
91 {
92 /* Type is stored in the "available" bits, 9, 10 and 11 */
93 return (flags >> 9) & 0x7;
94 }
96 /* Read the current domain's p2m table (through the linear mapping). */
97 static inline mfn_t gfn_to_mfn_current(unsigned long gfn, p2m_type_t *t)
98 {
99 mfn_t mfn = _mfn(INVALID_MFN);
100 p2m_type_t p2mt = p2m_mmio_dm;
101 /* XXX This is for compatibility with the old model, where anything not
102 * XXX marked as RAM was considered to be emulated MMIO space.
103 * XXX Once we start explicitly registering MMIO regions in the p2m
104 * XXX we will return p2m_invalid for unmapped gfns */
106 if ( gfn <= current->domain->arch.p2m.max_mapped_pfn )
107 {
108 l1_pgentry_t l1e = l1e_empty();
109 int ret;
111 ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START)
112 / sizeof(l1_pgentry_t));
114 /* Need to __copy_from_user because the p2m is sparse and this
115 * part might not exist */
116 ret = __copy_from_user(&l1e,
117 &phys_to_machine_mapping[gfn],
118 sizeof(l1e));
120 if ( ret == 0 ) {
121 p2mt = p2m_flags_to_type(l1e_get_flags(l1e));
122 ASSERT(l1e_get_pfn(l1e) != INVALID_MFN || !p2m_is_ram(p2mt));
123 if ( p2m_is_valid(p2mt) )
124 mfn = _mfn(l1e_get_pfn(l1e));
125 else
126 /* XXX see above */
127 p2mt = p2m_mmio_dm;
128 }
129 }
131 *t = p2mt;
132 return mfn;
133 }
135 /* Read another domain's P2M table, mapping pages as we go */
136 mfn_t gfn_to_mfn_foreign(struct domain *d, unsigned long gfn, p2m_type_t *t);
138 /* General conversion function from gfn to mfn */
139 #define gfn_to_mfn(d, g, t) _gfn_to_mfn((d), (g), (t))
140 static inline mfn_t _gfn_to_mfn(struct domain *d,
141 unsigned long gfn, p2m_type_t *t)
142 {
143 if ( !paging_mode_translate(d) )
144 {
145 /* Not necessarily true, but for non-translated guests, we claim
146 * it's the most generic kind of memory */
147 *t = p2m_ram_rw;
148 return _mfn(gfn);
149 }
150 if ( likely(current->domain == d) )
151 return gfn_to_mfn_current(gfn, t);
152 else
153 return gfn_to_mfn_foreign(d, gfn, t);
154 }
156 /* Compatibility function exporting the old untyped interface */
157 static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
158 {
159 mfn_t mfn;
160 p2m_type_t t;
161 mfn = gfn_to_mfn(d, gpfn, &t);
162 if ( p2m_is_valid(t) )
163 return mfn_x(mfn);
164 return INVALID_MFN;
165 }
167 /* General conversion function from mfn to gfn */
168 static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
169 {
170 if ( paging_mode_translate(d) )
171 return get_gpfn_from_mfn(mfn_x(mfn));
172 else
173 return mfn_x(mfn);
174 }
176 /* Translate the frame number held in an l1e from guest to machine */
177 static inline l1_pgentry_t
178 gl1e_to_ml1e(struct domain *d, l1_pgentry_t l1e)
179 {
180 if ( unlikely(paging_mode_translate(d)) )
181 l1e = l1e_from_pfn(gmfn_to_mfn(d, l1e_get_pfn(l1e)),
182 l1e_get_flags(l1e));
183 return l1e;
184 }
187 /* Init the datastructures for later use by the p2m code */
188 void p2m_init(struct domain *d);
190 /* Allocate a new p2m table for a domain.
191 *
192 * The alloc_page and free_page functions will be used to get memory to
193 * build the p2m, and to release it again at the end of day.
194 *
195 * Returns 0 for success or -errno. */
196 int p2m_alloc_table(struct domain *d,
197 struct page_info * (*alloc_page)(struct domain *d),
198 void (*free_page)(struct domain *d, struct page_info *pg));
200 /* Return all the p2m resources to Xen. */
201 void p2m_teardown(struct domain *d);
203 /* Add a page to a domain's p2m table */
204 void guest_physmap_add_page(struct domain *d, unsigned long gfn,
205 unsigned long mfn);
207 /* Remove a page from a domain's p2m table */
208 void guest_physmap_remove_page(struct domain *d, unsigned long gfn,
209 unsigned long mfn);
211 /* Change types across all p2m entries in a domain */
212 void p2m_change_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
214 /* Compare-exchange the type of a single p2m entry */
215 p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
216 p2m_type_t ot, p2m_type_t nt);
218 #endif /* _XEN_P2M_H */
220 /*
221 * Local variables:
222 * mode: C
223 * c-set-style: "BSD"
224 * c-basic-offset: 4
225 * indent-tabs-mode: nil
226 * End:
227 */