ia64/xen-unstable

view xen/include/asm-x86/shadow.h @ 12599:93e657836d07

[XEN] Remove VALID_MFN(); replace uses with mfn_valid().
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Mon Nov 27 17:48:24 2006 +0000 (2006-11-27)
parents 2fd223c64fc6
children b4baf35cff11
line source
1 /******************************************************************************
2 * include/asm-x86/shadow.h
3 *
4 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
5 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
6 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #ifndef _XEN_SHADOW_H
24 #define _XEN_SHADOW_H
26 #include <public/domctl.h>
27 #include <xen/sched.h>
28 #include <xen/perfc.h>
29 #include <xen/domain_page.h>
30 #include <asm/flushtlb.h>
32 /* How to make sure a page is not referred to in a shadow PT */
33 /* This will need to be a for_each_vcpu if we go to per-vcpu shadows */
34 #define shadow_drop_references(_d, _p) \
35 shadow_remove_all_mappings((_d)->vcpu[0], _mfn(page_to_mfn(_p)))
36 #define shadow_sync_and_drop_references(_d, _p) \
37 shadow_remove_all_mappings((_d)->vcpu[0], _mfn(page_to_mfn(_p)))
39 /* How to add and remove entries in the p2m mapping. */
40 #define guest_physmap_add_page(_d, _p, _m) \
41 shadow_guest_physmap_add_page((_d), (_p), (_m))
42 #define guest_physmap_remove_page(_d, _p, _m ) \
43 shadow_guest_physmap_remove_page((_d), (_p), (_m))
45 /* Shadow PT operation mode : shadow-mode variable in arch_domain. */
47 #define SHM2_shift 10
48 /* We're in one of the shadow modes */
49 #define SHM2_enable (1U << SHM2_shift)
50 /* Refcounts based on shadow tables instead of guest tables */
51 #define SHM2_refcounts (XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT << SHM2_shift)
52 /* Enable log dirty mode */
53 #define SHM2_log_dirty (XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY << SHM2_shift)
54 /* Xen does p2m translation, not guest */
55 #define SHM2_translate (XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE << SHM2_shift)
56 /* Xen does not steal address space from the domain for its own booking;
57 * requires VT or similar mechanisms */
58 #define SHM2_external (XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL << SHM2_shift)
60 #define shadow_mode_enabled(_d) ((_d)->arch.shadow.mode)
61 #define shadow_mode_refcounts(_d) ((_d)->arch.shadow.mode & SHM2_refcounts)
62 #define shadow_mode_log_dirty(_d) ((_d)->arch.shadow.mode & SHM2_log_dirty)
63 #define shadow_mode_translate(_d) ((_d)->arch.shadow.mode & SHM2_translate)
64 #define shadow_mode_external(_d) ((_d)->arch.shadow.mode & SHM2_external)
66 /* Xen traps & emulates all reads of all page table pages:
67 * not yet supported
68 */
69 #define shadow_mode_trap_reads(_d) ({ (void)(_d); 0; })
71 // How do we tell that we have a 32-bit PV guest in a 64-bit Xen?
72 #ifdef __x86_64__
73 #define pv_32bit_guest(_v) 0 // not yet supported
74 #else
75 #define pv_32bit_guest(_v) !is_hvm_vcpu(v)
76 #endif
78 /* The shadow lock.
79 *
80 * This lock is per-domain. It is intended to allow us to make atomic
81 * updates to the software TLB that the shadow tables provide.
82 *
83 * Specifically, it protects:
84 * - all changes to shadow page table pages
85 * - the shadow hash table
86 * - the shadow page allocator
87 * - all changes to guest page table pages; if/when the notion of
88 * out-of-sync pages is added to this code, then the shadow lock is
89 * protecting all guest page table pages which are not listed as
90 * currently as both guest-writable and out-of-sync...
91 * XXX -- need to think about this relative to writable page tables.
92 * - all changes to the page_info->tlbflush_timestamp
93 * - the page_info->count fields on shadow pages
94 * - the shadow dirty bit array and count
95 * - XXX
96 */
97 #ifndef CONFIG_SMP
98 #error shadow.h currently requires CONFIG_SMP
99 #endif
101 #define shadow_lock_init(_d) \
102 do { \
103 spin_lock_init(&(_d)->arch.shadow.lock); \
104 (_d)->arch.shadow.locker = -1; \
105 (_d)->arch.shadow.locker_function = "nobody"; \
106 } while (0)
108 #define shadow_lock_is_acquired(_d) \
109 (current->processor == (_d)->arch.shadow.locker)
111 #define shadow_lock(_d) \
112 do { \
113 if ( unlikely((_d)->arch.shadow.locker == current->processor) ) \
114 { \
115 printk("Error: shadow lock held by %s\n", \
116 (_d)->arch.shadow.locker_function); \
117 BUG(); \
118 } \
119 spin_lock(&(_d)->arch.shadow.lock); \
120 ASSERT((_d)->arch.shadow.locker == -1); \
121 (_d)->arch.shadow.locker = current->processor; \
122 (_d)->arch.shadow.locker_function = __func__; \
123 } while (0)
125 #define shadow_unlock(_d) \
126 do { \
127 ASSERT((_d)->arch.shadow.locker == current->processor); \
128 (_d)->arch.shadow.locker = -1; \
129 (_d)->arch.shadow.locker_function = "nobody"; \
130 spin_unlock(&(_d)->arch.shadow.lock); \
131 } while (0)
133 /*
134 * Levels of self-test and paranoia
135 * XXX should go in config files somewhere?
136 */
137 #define SHADOW_AUDIT_HASH 0x01 /* Check current hash bucket */
138 #define SHADOW_AUDIT_HASH_FULL 0x02 /* Check every hash bucket */
139 #define SHADOW_AUDIT_ENTRIES 0x04 /* Check this walk's shadows */
140 #define SHADOW_AUDIT_ENTRIES_FULL 0x08 /* Check every shadow */
141 #define SHADOW_AUDIT_ENTRIES_MFNS 0x10 /* Check gfn-mfn map in shadows */
142 #define SHADOW_AUDIT_P2M 0x20 /* Check the p2m table */
144 #ifdef NDEBUG
145 #define SHADOW_AUDIT 0
146 #define SHADOW_AUDIT_ENABLE 0
147 #else
148 #define SHADOW_AUDIT 0x15 /* Basic audit of all except p2m. */
149 #define SHADOW_AUDIT_ENABLE shadow_audit_enable
150 extern int shadow_audit_enable;
151 #endif
153 /*
154 * Levels of optimization
155 * XXX should go in config files somewhere?
156 */
157 #define SHOPT_WRITABLE_HEURISTIC 0x01 /* Guess at RW PTEs via linear maps */
158 #define SHOPT_EARLY_UNSHADOW 0x02 /* Unshadow l1s on fork or exit */
159 #define SHOPT_FAST_FAULT_PATH 0x04 /* Fast-path MMIO and not-present */
160 #define SHOPT_PREFETCH 0x08 /* Shadow multiple entries per fault */
161 #define SHOPT_LINUX_L3_TOPLEVEL 0x10 /* Pin l3es on early 64bit linux */
163 #define SHADOW_OPTIMIZATIONS 0x1f
166 /* With shadow pagetables, the different kinds of address start
167 * to get get confusing.
168 *
169 * Virtual addresses are what they usually are: the addresses that are used
170 * to accessing memory while the guest is running. The MMU translates from
171 * virtual addresses to machine addresses.
172 *
173 * (Pseudo-)physical addresses are the abstraction of physical memory the
174 * guest uses for allocation and so forth. For the purposes of this code,
175 * we can largely ignore them.
176 *
177 * Guest frame numbers (gfns) are the entries that the guest puts in its
178 * pagetables. For normal paravirtual guests, they are actual frame numbers,
179 * with the translation done by the guest.
180 *
181 * Machine frame numbers (mfns) are the entries that the hypervisor puts
182 * in the shadow page tables.
183 *
184 * Elsewhere in the xen code base, the name "gmfn" is generally used to refer
185 * to a "machine frame number, from the guest's perspective", or in other
186 * words, pseudo-physical frame numbers. However, in the shadow code, the
187 * term "gmfn" means "the mfn of a guest page"; this combines naturally with
188 * other terms such as "smfn" (the mfn of a shadow page), gl2mfn (the mfn of a
189 * guest L2 page), etc...
190 */
192 /* With this defined, we do some ugly things to force the compiler to
193 * give us type safety between mfns and gfns and other integers.
194 * TYPE_SAFE(int foo) defines a foo_t, and _foo() and foo_x() functions
195 * that translate beween int and foo_t.
196 *
197 * It does have some performance cost because the types now have
198 * a different storage attribute, so may not want it on all the time. */
199 #ifndef NDEBUG
200 #define TYPE_SAFETY 1
201 #endif
203 #ifdef TYPE_SAFETY
204 #define TYPE_SAFE(_type,_name) \
205 typedef struct { _type _name; } _name##_t; \
206 static inline _name##_t _##_name(_type n) { return (_name##_t) { n }; } \
207 static inline _type _name##_x(_name##_t n) { return n._name; }
208 #else
209 #define TYPE_SAFE(_type,_name) \
210 typedef _type _name##_t; \
211 static inline _name##_t _##_name(_type n) { return n; } \
212 static inline _type _name##_x(_name##_t n) { return n; }
213 #endif
215 TYPE_SAFE(unsigned long,mfn)
216 #define SH_PRI_mfn "05lx"
218 static inline int
219 valid_mfn(mfn_t m)
220 {
221 return mfn_valid(mfn_x(m));
222 }
224 static inline mfn_t
225 pagetable_get_mfn(pagetable_t pt)
226 {
227 return _mfn(pagetable_get_pfn(pt));
228 }
230 static inline pagetable_t
231 pagetable_from_mfn(mfn_t mfn)
232 {
233 return pagetable_from_pfn(mfn_x(mfn));
234 }
236 static inline int
237 shadow_vcpu_mode_translate(struct vcpu *v)
238 {
239 // Returns true if this VCPU needs to be using the P2M table to translate
240 // between GFNs and MFNs.
241 //
242 // This is true of translated HVM domains on a vcpu which has paging
243 // enabled. (HVM vcpu's with paging disabled are using the p2m table as
244 // its paging table, so no translation occurs in this case.)
245 //
246 // It is also true for translated PV domains.
247 //
248 return v->arch.shadow.translate_enabled;
249 }
252 /**************************************************************************/
253 /* Mode-specific entry points into the shadow code */
255 struct x86_emulate_ctxt;
256 struct shadow_paging_mode {
257 int (*page_fault )(struct vcpu *v, unsigned long va,
258 struct cpu_user_regs *regs);
259 int (*invlpg )(struct vcpu *v, unsigned long va);
260 paddr_t (*gva_to_gpa )(struct vcpu *v, unsigned long va);
261 unsigned long (*gva_to_gfn )(struct vcpu *v, unsigned long va);
262 void (*update_cr3 )(struct vcpu *v);
263 int (*map_and_validate_gl1e )(struct vcpu *v, mfn_t gmfn,
264 void *new_guest_entry, u32 size);
265 int (*map_and_validate_gl2e )(struct vcpu *v, mfn_t gmfn,
266 void *new_guest_entry, u32 size);
267 int (*map_and_validate_gl2he)(struct vcpu *v, mfn_t gmfn,
268 void *new_guest_entry, u32 size);
269 int (*map_and_validate_gl3e )(struct vcpu *v, mfn_t gmfn,
270 void *new_guest_entry, u32 size);
271 int (*map_and_validate_gl4e )(struct vcpu *v, mfn_t gmfn,
272 void *new_guest_entry, u32 size);
273 void (*detach_old_tables )(struct vcpu *v);
274 int (*x86_emulate_write )(struct vcpu *v, unsigned long va,
275 void *src, u32 bytes,
276 struct x86_emulate_ctxt *ctxt);
277 int (*x86_emulate_cmpxchg )(struct vcpu *v, unsigned long va,
278 unsigned long old,
279 unsigned long new,
280 unsigned int bytes,
281 struct x86_emulate_ctxt *ctxt);
282 int (*x86_emulate_cmpxchg8b )(struct vcpu *v, unsigned long va,
283 unsigned long old_lo,
284 unsigned long old_hi,
285 unsigned long new_lo,
286 unsigned long new_hi,
287 struct x86_emulate_ctxt *ctxt);
288 mfn_t (*make_monitor_table )(struct vcpu *v);
289 void (*destroy_monitor_table )(struct vcpu *v, mfn_t mmfn);
290 void * (*guest_map_l1e )(struct vcpu *v, unsigned long va,
291 unsigned long *gl1mfn);
292 void (*guest_get_eff_l1e )(struct vcpu *v, unsigned long va,
293 void *eff_l1e);
294 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
295 int (*guess_wrmap )(struct vcpu *v,
296 unsigned long vaddr, mfn_t gmfn);
297 #endif
298 /* For outsiders to tell what mode we're in */
299 unsigned int shadow_levels;
300 unsigned int guest_levels;
301 };
303 static inline int shadow_guest_paging_levels(struct vcpu *v)
304 {
305 ASSERT(v->arch.shadow.mode != NULL);
306 return v->arch.shadow.mode->guest_levels;
307 }
309 /**************************************************************************/
310 /* Entry points into the shadow code */
312 /* Enable arbitrary shadow mode. */
313 int shadow_enable(struct domain *d, u32 mode);
315 /* Turning on shadow test mode */
316 int shadow_test_enable(struct domain *d);
318 /* Handler for shadow control ops: enabling and disabling shadow modes,
319 * and log-dirty bitmap ops all happen through here. */
320 int shadow_domctl(struct domain *d,
321 xen_domctl_shadow_op_t *sc,
322 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
324 /* Call when destroying a domain */
325 void shadow_teardown(struct domain *d);
327 /* Call once all of the references to the domain have gone away */
328 void shadow_final_teardown(struct domain *d);
331 /* Mark a page as dirty in the bitmap */
332 void sh_do_mark_dirty(struct domain *d, mfn_t gmfn);
333 static inline void mark_dirty(struct domain *d, unsigned long gmfn)
334 {
335 if ( likely(!shadow_mode_log_dirty(d)) )
336 return;
338 shadow_lock(d);
339 sh_do_mark_dirty(d, _mfn(gmfn));
340 shadow_unlock(d);
341 }
343 /* Internal version, for when the shadow lock is already held */
344 static inline void sh_mark_dirty(struct domain *d, mfn_t gmfn)
345 {
346 ASSERT(shadow_lock_is_acquired(d));
347 if ( unlikely(shadow_mode_log_dirty(d)) )
348 sh_do_mark_dirty(d, gmfn);
349 }
351 static inline int
352 shadow_fault(unsigned long va, struct cpu_user_regs *regs)
353 /* Called from pagefault handler in Xen, and from the HVM trap handlers
354 * for pagefaults. Returns 1 if this fault was an artefact of the
355 * shadow code (and the guest should retry) or 0 if it is not (and the
356 * fault should be handled elsewhere or passed to the guest). */
357 {
358 struct vcpu *v = current;
359 perfc_incrc(shadow_fault);
360 return v->arch.shadow.mode->page_fault(v, va, regs);
361 }
363 static inline int
364 shadow_invlpg(struct vcpu *v, unsigned long va)
365 /* Called when the guest requests an invlpg. Returns 1 if the invlpg
366 * instruction should be issued on the hardware, or 0 if it's safe not
367 * to do so. */
368 {
369 return v->arch.shadow.mode->invlpg(v, va);
370 }
372 static inline paddr_t
373 shadow_gva_to_gpa(struct vcpu *v, unsigned long va)
374 /* Called to translate a guest virtual address to what the *guest*
375 * pagetables would map it to. */
376 {
377 if ( unlikely(!shadow_vcpu_mode_translate(v)) )
378 return (paddr_t) va;
379 return v->arch.shadow.mode->gva_to_gpa(v, va);
380 }
382 static inline unsigned long
383 shadow_gva_to_gfn(struct vcpu *v, unsigned long va)
384 /* Called to translate a guest virtual address to what the *guest*
385 * pagetables would map it to. */
386 {
387 if ( unlikely(!shadow_vcpu_mode_translate(v)) )
388 return va >> PAGE_SHIFT;
389 return v->arch.shadow.mode->gva_to_gfn(v, va);
390 }
392 static inline void
393 shadow_update_cr3(struct vcpu *v)
394 /* Updates all the things that are derived from the guest's CR3.
395 * Called when the guest changes CR3. */
396 {
397 shadow_lock(v->domain);
398 v->arch.shadow.mode->update_cr3(v);
399 shadow_unlock(v->domain);
400 }
403 /* Should be called after CR3 is updated.
404 * Updates vcpu->arch.cr3 and, for HVM guests, vcpu->arch.hvm_vcpu.cpu_cr3.
405 *
406 * Also updates other state derived from CR3 (vcpu->arch.guest_vtable,
407 * shadow_vtable, etc).
408 *
409 * Uses values found in vcpu->arch.(guest_table and guest_table_user), and
410 * for HVM guests, arch.monitor_table and hvm's guest CR3.
411 *
412 * Update ref counts to shadow tables appropriately.
413 */
414 static inline void update_cr3(struct vcpu *v)
415 {
416 unsigned long cr3_mfn=0;
418 if ( shadow_mode_enabled(v->domain) )
419 {
420 shadow_update_cr3(v);
421 return;
422 }
424 #if CONFIG_PAGING_LEVELS == 4
425 if ( !(v->arch.flags & TF_kernel_mode) )
426 cr3_mfn = pagetable_get_pfn(v->arch.guest_table_user);
427 else
428 #endif
429 cr3_mfn = pagetable_get_pfn(v->arch.guest_table);
431 make_cr3(v, cr3_mfn);
432 }
434 extern void sh_update_paging_modes(struct vcpu *v);
436 /* Should be called to initialise paging structures if the paging mode
437 * has changed, and when bringing up a VCPU for the first time. */
438 static inline void shadow_update_paging_modes(struct vcpu *v)
439 {
440 ASSERT(shadow_mode_enabled(v->domain));
441 shadow_lock(v->domain);
442 sh_update_paging_modes(v);
443 shadow_unlock(v->domain);
444 }
446 static inline void
447 shadow_detach_old_tables(struct vcpu *v)
448 {
449 if ( v->arch.shadow.mode )
450 v->arch.shadow.mode->detach_old_tables(v);
451 }
453 static inline mfn_t
454 shadow_make_monitor_table(struct vcpu *v)
455 {
456 return v->arch.shadow.mode->make_monitor_table(v);
457 }
459 static inline void
460 shadow_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
461 {
462 v->arch.shadow.mode->destroy_monitor_table(v, mmfn);
463 }
465 static inline void *
466 guest_map_l1e(struct vcpu *v, unsigned long addr, unsigned long *gl1mfn)
467 {
468 if ( likely(!shadow_mode_translate(v->domain)) )
469 {
470 l2_pgentry_t l2e;
471 ASSERT(!shadow_mode_external(v->domain));
472 /* Find this l1e and its enclosing l1mfn in the linear map */
473 if ( __copy_from_user(&l2e,
474 &__linear_l2_table[l2_linear_offset(addr)],
475 sizeof(l2_pgentry_t)) != 0 )
476 return NULL;
477 /* Check flags that it will be safe to read the l1e */
478 if ( (l2e_get_flags(l2e) & (_PAGE_PRESENT | _PAGE_PSE))
479 != _PAGE_PRESENT )
480 return NULL;
481 *gl1mfn = l2e_get_pfn(l2e);
482 return &__linear_l1_table[l1_linear_offset(addr)];
483 }
485 return v->arch.shadow.mode->guest_map_l1e(v, addr, gl1mfn);
486 }
488 static inline void
489 guest_unmap_l1e(struct vcpu *v, void *p)
490 {
491 if ( unlikely(shadow_mode_translate(v->domain)) )
492 unmap_domain_page(p);
493 }
495 static inline void
496 guest_get_eff_l1e(struct vcpu *v, unsigned long addr, void *eff_l1e)
497 {
498 if ( likely(!shadow_mode_translate(v->domain)) )
499 {
500 ASSERT(!shadow_mode_external(v->domain));
501 if ( __copy_from_user(eff_l1e,
502 &__linear_l1_table[l1_linear_offset(addr)],
503 sizeof(l1_pgentry_t)) != 0 )
504 *(l1_pgentry_t *)eff_l1e = l1e_empty();
505 return;
506 }
508 v->arch.shadow.mode->guest_get_eff_l1e(v, addr, eff_l1e);
509 }
511 static inline void
512 guest_get_eff_kern_l1e(struct vcpu *v, unsigned long addr, void *eff_l1e)
513 {
514 #if defined(__x86_64__)
515 int user_mode = !(v->arch.flags & TF_kernel_mode);
516 #define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v)
517 #else
518 #define TOGGLE_MODE() ((void)0)
519 #endif
521 TOGGLE_MODE();
522 guest_get_eff_l1e(v, addr, eff_l1e);
523 TOGGLE_MODE();
524 }
527 /* Validate a pagetable change from the guest and update the shadows. */
528 extern int shadow_validate_guest_entry(struct vcpu *v, mfn_t gmfn,
529 void *new_guest_entry);
530 extern int __shadow_validate_guest_entry(struct vcpu *v, mfn_t gmfn,
531 void *entry, u32 size);
533 /* Update the shadows in response to a pagetable write from a HVM guest */
534 extern void shadow_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn,
535 void *entry, u32 size);
537 /* Remove all writeable mappings of a guest frame from the shadows.
538 * Returns non-zero if we need to flush TLBs.
539 * level and fault_addr desribe how we found this to be a pagetable;
540 * level==0 means we have some other reason for revoking write access. */
541 extern int shadow_remove_write_access(struct vcpu *v, mfn_t readonly_mfn,
542 unsigned int level,
543 unsigned long fault_addr);
545 /* Remove all mappings of the guest mfn from the shadows.
546 * Returns non-zero if we need to flush TLBs. */
547 extern int shadow_remove_all_mappings(struct vcpu *v, mfn_t target_mfn);
549 void
550 shadow_remove_all_shadows_and_parents(struct vcpu *v, mfn_t gmfn);
551 /* This is a HVM page that we thing is no longer a pagetable.
552 * Unshadow it, and recursively unshadow pages that reference it. */
554 /* Remove all shadows of the guest mfn. */
555 extern void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all);
556 static inline void shadow_remove_all_shadows(struct vcpu *v, mfn_t gmfn)
557 {
558 int was_locked = shadow_lock_is_acquired(v->domain);
559 if ( !was_locked )
560 shadow_lock(v->domain);
561 sh_remove_shadows(v, gmfn, 0, 1);
562 if ( !was_locked )
563 shadow_unlock(v->domain);
564 }
566 /* Add a page to a domain */
567 void
568 shadow_guest_physmap_add_page(struct domain *d, unsigned long gfn,
569 unsigned long mfn);
571 /* Remove a page from a domain */
572 void
573 shadow_guest_physmap_remove_page(struct domain *d, unsigned long gfn,
574 unsigned long mfn);
576 /*
577 * Allocation of shadow pages
578 */
580 /* Return the minumum acceptable number of shadow pages a domain needs */
581 unsigned int shadow_min_acceptable_pages(struct domain *d);
583 /* Set the pool of shadow pages to the required number of MB.
584 * Input will be rounded up to at least min_acceptable_shadow_pages().
585 * Returns 0 for success, 1 for failure. */
586 unsigned int shadow_set_allocation(struct domain *d,
587 unsigned int megabytes,
588 int *preempted);
590 /* Return the size of the shadow pool, rounded up to the nearest MB */
591 static inline unsigned int shadow_get_allocation(struct domain *d)
592 {
593 unsigned int pg = d->arch.shadow.total_pages;
594 return ((pg >> (20 - PAGE_SHIFT))
595 + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
596 }
599 /**************************************************************************/
600 /* Guest physmap (p2m) support
601 *
602 * The phys_to_machine_mapping is the reversed mapping of MPT for full
603 * virtualization. It is only used by shadow_mode_translate()==true
604 * guests, so we steal the address space that would have normally
605 * been used by the read-only MPT map.
606 */
608 #define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
610 /* Read the current domain's P2M table. */
611 static inline mfn_t sh_gfn_to_mfn_current(unsigned long gfn)
612 {
613 l1_pgentry_t l1e = l1e_empty();
614 int ret;
616 if ( gfn > current->domain->arch.max_mapped_pfn )
617 return _mfn(INVALID_MFN);
619 /* Don't read off the end of the p2m table */
620 ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t));
622 ret = __copy_from_user(&l1e,
623 &phys_to_machine_mapping[gfn],
624 sizeof(l1e));
626 if ( (ret == 0) && (l1e_get_flags(l1e) & _PAGE_PRESENT) )
627 return _mfn(l1e_get_pfn(l1e));
629 return _mfn(INVALID_MFN);
630 }
632 /* Walk another domain's P2M table, mapping pages as we go */
633 extern mfn_t sh_gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
635 /* General conversion function from gfn to mfn */
636 static inline mfn_t
637 sh_gfn_to_mfn(struct domain *d, unsigned long gfn)
638 {
639 if ( !shadow_mode_translate(d) )
640 return _mfn(gfn);
641 if ( likely(current->domain == d) )
642 return sh_gfn_to_mfn_current(gfn);
643 else
644 return sh_gfn_to_mfn_foreign(d, gfn);
645 }
647 /* Compatibility function for HVM code */
648 static inline unsigned long get_mfn_from_gpfn(unsigned long pfn)
649 {
650 return mfn_x(sh_gfn_to_mfn_current(pfn));
651 }
653 /* General conversion function from mfn to gfn */
654 static inline unsigned long
655 sh_mfn_to_gfn(struct domain *d, mfn_t mfn)
656 {
657 if ( shadow_mode_translate(d) )
658 return get_gpfn_from_mfn(mfn_x(mfn));
659 else
660 return mfn_x(mfn);
661 }
663 /* Is this guest address an mmio one? (i.e. not defined in p2m map) */
664 static inline int
665 mmio_space(paddr_t gpa)
666 {
667 unsigned long gfn = gpa >> PAGE_SHIFT;
668 return !mfn_valid(mfn_x(sh_gfn_to_mfn_current(gfn)));
669 }
671 static inline l1_pgentry_t
672 gl1e_to_ml1e(struct domain *d, l1_pgentry_t l1e)
673 {
674 if ( unlikely(shadow_mode_translate(d)) )
675 l1e = l1e_from_pfn(gmfn_to_mfn(d, l1e_get_pfn(l1e)),
676 l1e_get_flags(l1e));
677 return l1e;
678 }
680 #endif /* _XEN_SHADOW_H */
682 /*
683 * Local variables:
684 * mode: C
685 * c-set-style: "BSD"
686 * c-basic-offset: 4
687 * indent-tabs-mode: nil
688 * End:
689 */