ia64/xen-unstable

view xen/include/asm-x86/p2m.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 5848b49b74fc
children
line source
1 /******************************************************************************
2 * include/asm-x86/paging.h
3 *
4 * physical-to-machine mappings for automatically-translated domains.
5 *
6 * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
7 * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
8 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
9 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
26 #ifndef _XEN_P2M_H
27 #define _XEN_P2M_H
29 #include <xen/config.h>
30 #include <xen/paging.h>
32 /*
33 * The phys_to_machine_mapping maps guest physical frame numbers
34 * to machine frame numbers. It only exists for paging_mode_translate
35 * guests. It is organised in page-table format, which:
36 *
37 * (1) allows us to use it directly as the second pagetable in hardware-
38 * assisted paging and (hopefully) iommu support; and
39 * (2) lets us map it directly into the guest vcpus' virtual address space
40 * as a linear pagetable, so we can read and write it easily.
41 *
42 * For (2) we steal the address space that would have normally been used
43 * by the read-only MPT map in a non-translated guest. (For
44 * paging_mode_external() guests this mapping is in the monitor table.)
45 */
46 #define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
48 /*
49 * The upper levels of the p2m pagetable always contain full rights; all
50 * variation in the access control bits is made in the level-1 PTEs.
51 *
52 * In addition to the phys-to-machine translation, each p2m PTE contains
53 * *type* information about the gfn it translates, helping Xen to decide
54 * on the correct course of action when handling a page-fault to that
55 * guest frame. We store the type in the "available" bits of the PTEs
56 * in the table, which gives us 8 possible types on 32-bit systems.
57 * Further expansions of the type system will only be supported on
58 * 64-bit Xen.
59 */
60 typedef enum {
61 p2m_invalid = 0, /* Nothing mapped here */
62 p2m_ram_rw = 1, /* Normal read/write guest RAM */
63 p2m_ram_logdirty = 2, /* Temporarily read-only for log-dirty */
64 p2m_ram_ro = 3, /* Read-only; writes are silently dropped */
65 p2m_mmio_dm = 4, /* Reads and write go to the device model */
66 p2m_mmio_direct = 5, /* Read/write mapping of genuine MMIO area */
67 p2m_populate_on_demand = 6, /* Place-holder for empty memory */
68 } p2m_type_t;
70 typedef enum {
71 p2m_query = 0, /* Do not populate a PoD entries */
72 p2m_alloc = 1, /* Automatically populate PoD entries */
73 p2m_guest = 2, /* Guest demand-fault; implies alloc */
74 } p2m_query_t;
76 /* We use bitmaps and maks to handle groups of types */
77 #define p2m_to_mask(_t) (1UL << (_t))
79 /* RAM types, which map to real machine frames */
80 #define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) \
81 | p2m_to_mask(p2m_ram_logdirty) \
82 | p2m_to_mask(p2m_ram_ro))
84 /* MMIO types, which don't have to map to anything in the frametable */
85 #define P2M_MMIO_TYPES (p2m_to_mask(p2m_mmio_dm) \
86 | p2m_to_mask(p2m_mmio_direct))
88 /* Read-only types, which must have the _PAGE_RW bit clear in their PTEs */
89 #define P2M_RO_TYPES (p2m_to_mask(p2m_ram_logdirty) \
90 | p2m_to_mask(p2m_ram_ro))
92 #define P2M_MAGIC_TYPES (p2m_to_mask(p2m_populate_on_demand))
94 /* Useful predicates */
95 #define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES)
96 #define p2m_is_mmio(_t) (p2m_to_mask(_t) & P2M_MMIO_TYPES)
97 #define p2m_is_readonly(_t) (p2m_to_mask(_t) & P2M_RO_TYPES)
98 #define p2m_is_magic(_t) (p2m_to_mask(_t) & P2M_MAGIC_TYPES)
99 #define p2m_is_valid(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | P2M_MMIO_TYPES))
101 /* Populate-on-demand */
102 #define POPULATE_ON_DEMAND_MFN (1<<9)
103 #define POD_PAGE_ORDER 9
106 struct p2m_domain {
107 /* Lock that protects updates to the p2m */
108 spinlock_t lock;
109 int locker; /* processor which holds the lock */
110 const char *locker_function; /* Func that took it */
112 /* Pages used to construct the p2m */
113 struct page_list_head pages;
115 /* Functions to call to get or free pages for the p2m */
116 struct page_info * (*alloc_page )(struct domain *d);
117 void (*free_page )(struct domain *d,
118 struct page_info *pg);
119 int (*set_entry )(struct domain *d, unsigned long gfn,
120 mfn_t mfn, unsigned int page_order,
121 p2m_type_t p2mt);
122 mfn_t (*get_entry )(struct domain *d, unsigned long gfn,
123 p2m_type_t *p2mt,
124 p2m_query_t q);
125 mfn_t (*get_entry_current)(unsigned long gfn,
126 p2m_type_t *p2mt,
127 p2m_query_t q);
128 void (*change_entry_type_global)(struct domain *d,
129 p2m_type_t ot,
130 p2m_type_t nt);
132 /* Highest guest frame that's ever been mapped in the p2m */
133 unsigned long max_mapped_pfn;
135 /* Populate-on-demand variables
136 * NB on locking. {super,single,count} are
137 * covered by d->page_alloc_lock, since they're almost always used in
138 * conjunction with that functionality. {entry_count} is covered by
139 * the domain p2m lock, since it's almost always used in conjunction
140 * with changing the p2m tables.
141 *
142 * At this point, both locks are held in two places. In both,
143 * the order is [p2m,page_alloc]:
144 * + p2m_pod_decrease_reservation() calls p2m_pod_cache_add(),
145 * which grabs page_alloc
146 * + p2m_pod_demand_populate() grabs both; the p2m lock to avoid
147 * double-demand-populating of pages, the page_alloc lock to
148 * protect moving stuff from the PoD cache to the domain page list.
149 */
150 struct {
151 struct page_list_head super, /* List of superpages */
152 single; /* Non-super lists */
153 int count, /* # of pages in cache lists */
154 entry_count; /* # of pages in p2m marked pod */
155 unsigned reclaim_super; /* Last gpfn of a scan */
156 unsigned reclaim_single; /* Last gpfn of a scan */
157 unsigned max_guest; /* gpfn of max guest demand-populate */
158 } pod;
159 };
161 /* Extract the type from the PTE flags that store it */
162 static inline p2m_type_t p2m_flags_to_type(unsigned long flags)
163 {
164 /* Type is stored in the "available" bits, 9, 10 and 11 */
165 return (flags >> 9) & 0x7;
166 }
168 /* Read the current domain's p2m table. Do not populate PoD pages. */
169 static inline mfn_t gfn_to_mfn_type_current(unsigned long gfn, p2m_type_t *t,
170 p2m_query_t q)
171 {
172 return current->domain->arch.p2m->get_entry_current(gfn, t, q);
173 }
175 /* Read another domain's P2M table, mapping pages as we go.
176 * Do not populate PoD pages. */
177 static inline
178 mfn_t gfn_to_mfn_type_foreign(struct domain *d, unsigned long gfn, p2m_type_t *t,
179 p2m_query_t q)
180 {
181 return d->arch.p2m->get_entry(d, gfn, t, q);
182 }
184 /* General conversion function from gfn to mfn */
185 static inline mfn_t _gfn_to_mfn_type(struct domain *d,
186 unsigned long gfn, p2m_type_t *t,
187 p2m_query_t q)
188 {
189 if ( !paging_mode_translate(d) )
190 {
191 /* Not necessarily true, but for non-translated guests, we claim
192 * it's the most generic kind of memory */
193 *t = p2m_ram_rw;
194 return _mfn(gfn);
195 }
196 if ( likely(current->domain == d) )
197 return gfn_to_mfn_type_current(gfn, t, q);
198 else
199 return gfn_to_mfn_type_foreign(d, gfn, t, q);
200 }
202 #define gfn_to_mfn(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_alloc)
203 #define gfn_to_mfn_query(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_query)
204 #define gfn_to_mfn_guest(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_guest)
206 #define gfn_to_mfn_current(g, t) gfn_to_mfn_type_current((g), (t), p2m_alloc)
207 #define gfn_to_mfn_foreign(d, g, t) gfn_to_mfn_type_foreign((d), (g), (t), p2m_alloc)
209 /* Compatibility function exporting the old untyped interface */
210 static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
211 {
212 mfn_t mfn;
213 p2m_type_t t;
214 mfn = gfn_to_mfn(d, gpfn, &t);
215 if ( p2m_is_valid(t) )
216 return mfn_x(mfn);
217 return INVALID_MFN;
218 }
220 /* General conversion function from mfn to gfn */
221 static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
222 {
223 if ( paging_mode_translate(d) )
224 return get_gpfn_from_mfn(mfn_x(mfn));
225 else
226 return mfn_x(mfn);
227 }
229 /* Translate the frame number held in an l1e from guest to machine */
230 static inline l1_pgentry_t
231 gl1e_to_ml1e(struct domain *d, l1_pgentry_t l1e)
232 {
233 if ( unlikely(paging_mode_translate(d)) )
234 l1e = l1e_from_pfn(gmfn_to_mfn(d, l1e_get_pfn(l1e)),
235 l1e_get_flags(l1e));
236 return l1e;
237 }
240 /* Init the datastructures for later use by the p2m code */
241 int p2m_init(struct domain *d);
243 /* Allocate a new p2m table for a domain.
244 *
245 * The alloc_page and free_page functions will be used to get memory to
246 * build the p2m, and to release it again at the end of day.
247 *
248 * Returns 0 for success or -errno. */
249 int p2m_alloc_table(struct domain *d,
250 struct page_info * (*alloc_page)(struct domain *d),
251 void (*free_page)(struct domain *d, struct page_info *pg));
253 /* Return all the p2m resources to Xen. */
254 void p2m_teardown(struct domain *d);
255 void p2m_final_teardown(struct domain *d);
257 /* Dump PoD information about the domain */
258 void p2m_pod_dump_data(struct domain *d);
260 /* Move all pages from the populate-on-demand cache to the domain page_list
261 * (usually in preparation for domain destruction) */
262 void p2m_pod_empty_cache(struct domain *d);
264 /* Set populate-on-demand cache size so that the total memory allocated to a
265 * domain matches target */
266 int p2m_pod_set_mem_target(struct domain *d, unsigned long target);
268 /* Call when decreasing memory reservation to handle PoD entries properly.
269 * Will return '1' if all entries were handled and nothing more need be done.*/
270 int
271 p2m_pod_decrease_reservation(struct domain *d,
272 xen_pfn_t gpfn,
273 unsigned int order);
275 /* Add a page to a domain's p2m table */
276 int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
277 unsigned long mfn, unsigned int page_order,
278 p2m_type_t t);
280 /* Set a p2m range as populate-on-demand */
281 int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
282 unsigned int order);
284 /* Untyped version for RAM only, for compatibility
285 *
286 * Return 0 for success
287 */
288 static inline int guest_physmap_add_page(struct domain *d, unsigned long gfn,
289 unsigned long mfn,
290 unsigned int page_order)
291 {
292 return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
293 }
295 /* Remove a page from a domain's p2m table */
296 void guest_physmap_remove_page(struct domain *d, unsigned long gfn,
297 unsigned long mfn, unsigned int page_order);
299 /* Change types across all p2m entries in a domain */
300 void p2m_change_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
301 void p2m_change_entry_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
303 /* Compare-exchange the type of a single p2m entry */
304 p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
305 p2m_type_t ot, p2m_type_t nt);
307 /* Set mmio addresses in the p2m table (for pass-through) */
308 int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
309 int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
311 #endif /* _XEN_P2M_H */
313 /*
314 * Local variables:
315 * mode: C
316 * c-set-style: "BSD"
317 * c-basic-offset: 4
318 * indent-tabs-mode: nil
319 * End:
320 */