direct-io.hg

view xen/include/asm-x86/shadow.h @ 11648:5f42b4824e45

[XEN] Fix interaction between tlbflush timestamp and shadow flags
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <tim.deegan@xensource.com>
date Thu Sep 28 17:09:11 2006 +0100 (2006-09-28)
parents 69e52712fbc4
children b6ee084892da
line source
1 /******************************************************************************
2 * include/asm-x86/shadow.h
3 *
4 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
5 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
6 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #ifndef _XEN_SHADOW_H
24 #define _XEN_SHADOW_H
26 #include <public/domctl.h>
27 #include <xen/sched.h>
28 #include <xen/perfc.h>
29 #include <asm/flushtlb.h>
31 /* How to make sure a page is not referred to in a shadow PT */
32 /* This will need to be a for_each_vcpu if we go to per-vcpu shadows */
33 #define shadow_drop_references(_d, _p) \
34 shadow_remove_all_mappings((_d)->vcpu[0], _mfn(page_to_mfn(_p)))
35 #define shadow_sync_and_drop_references(_d, _p) \
36 shadow_remove_all_mappings((_d)->vcpu[0], _mfn(page_to_mfn(_p)))
38 /* How to add and remove entries in the p2m mapping. */
39 #define guest_physmap_add_page(_d, _p, _m) \
40 shadow_guest_physmap_add_page((_d), (_p), (_m))
41 #define guest_physmap_remove_page(_d, _p, _m ) \
42 shadow_guest_physmap_remove_page((_d), (_p), (_m))
44 /* Shadow PT operation mode : shadow-mode variable in arch_domain. */
46 #define SHM2_shift 10
47 /* We're in one of the shadow modes */
48 #define SHM2_enable (1U << SHM2_shift)
49 /* Refcounts based on shadow tables instead of guest tables */
50 #define SHM2_refcounts (XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT << SHM2_shift)
51 /* Enable log dirty mode */
52 #define SHM2_log_dirty (XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY << SHM2_shift)
53 /* Xen does p2m translation, not guest */
54 #define SHM2_translate (XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE << SHM2_shift)
55 /* Xen does not steal address space from the domain for its own booking;
56 * requires VT or similar mechanisms */
57 #define SHM2_external (XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL << SHM2_shift)
59 #define shadow_mode_enabled(_d) ((_d)->arch.shadow.mode)
60 #define shadow_mode_refcounts(_d) ((_d)->arch.shadow.mode & SHM2_refcounts)
61 #define shadow_mode_log_dirty(_d) ((_d)->arch.shadow.mode & SHM2_log_dirty)
62 #define shadow_mode_translate(_d) ((_d)->arch.shadow.mode & SHM2_translate)
63 #define shadow_mode_external(_d) ((_d)->arch.shadow.mode & SHM2_external)
65 /* Xen traps & emulates all reads of all page table pages:
66 *not yet supported
67 */
68 #define shadow_mode_trap_reads(_d) ({ (void)(_d); 0; })
70 // flags used in the return value of the shadow_set_lXe() functions...
71 #define SHADOW_SET_CHANGED 0x1
72 #define SHADOW_SET_FLUSH 0x2
73 #define SHADOW_SET_ERROR 0x4
74 #define SHADOW_SET_L3PAE_RECOPY 0x8
76 // How do we tell that we have a 32-bit PV guest in a 64-bit Xen?
77 #ifdef __x86_64__
78 #define pv_32bit_guest(_v) 0 // not yet supported
79 #else
80 #define pv_32bit_guest(_v) !hvm_guest(v)
81 #endif
83 /* The shadow lock.
84 *
85 * This lock is per-domain. It is intended to allow us to make atomic
86 * updates to the software TLB that the shadow tables provide.
87 *
88 * Specifically, it protects:
89 * - all changes to shadow page table pages
90 * - the shadow hash table
91 * - the shadow page allocator
92 * - all changes to guest page table pages; if/when the notion of
93 * out-of-sync pages is added to this code, then the shadow lock is
94 * protecting all guest page table pages which are not listed as
95 * currently as both guest-writable and out-of-sync...
96 * XXX -- need to think about this relative to writable page tables.
97 * - all changes to the page_info->tlbflush_timestamp
98 * - the page_info->count fields on shadow pages
99 * - the shadow dirty bit array and count
100 * - XXX
101 */
102 #ifndef CONFIG_SMP
103 #error shadow.h currently requires CONFIG_SMP
104 #endif
106 #define shadow_lock_init(_d) \
107 do { \
108 spin_lock_init(&(_d)->arch.shadow.lock); \
109 (_d)->arch.shadow.locker = -1; \
110 (_d)->arch.shadow.locker_function = "nobody"; \
111 } while (0)
113 #define shadow_lock_is_acquired(_d) \
114 (current->processor == (_d)->arch.shadow.locker)
116 #define shadow_lock(_d) \
117 do { \
118 if ( unlikely((_d)->arch.shadow.locker == current->processor) ) \
119 { \
120 printk("Error: shadow lock held by %s\n", \
121 (_d)->arch.shadow.locker_function); \
122 BUG(); \
123 } \
124 spin_lock(&(_d)->arch.shadow.lock); \
125 ASSERT((_d)->arch.shadow.locker == -1); \
126 (_d)->arch.shadow.locker = current->processor; \
127 (_d)->arch.shadow.locker_function = __func__; \
128 } while (0)
130 #define shadow_unlock(_d) \
131 do { \
132 ASSERT((_d)->arch.shadow.locker == current->processor); \
133 (_d)->arch.shadow.locker = -1; \
134 (_d)->arch.shadow.locker_function = "nobody"; \
135 spin_unlock(&(_d)->arch.shadow.lock); \
136 } while (0)
138 /*
139 * Levels of self-test and paranoia
140 * XXX should go in config files somewhere?
141 */
142 #define SHADOW_AUDIT_HASH 0x01 /* Check current hash bucket */
143 #define SHADOW_AUDIT_HASH_FULL 0x02 /* Check every hash bucket */
144 #define SHADOW_AUDIT_ENTRIES 0x04 /* Check this walk's shadows */
145 #define SHADOW_AUDIT_ENTRIES_FULL 0x08 /* Check every shadow */
146 #define SHADOW_AUDIT_ENTRIES_MFNS 0x10 /* Check gfn-mfn map in shadows */
147 #define SHADOW_AUDIT_P2M 0x20 /* Check the p2m table */
149 #ifdef NDEBUG
150 #define SHADOW_AUDIT 0
151 #define SHADOW_AUDIT_ENABLE 0
152 #else
153 #define SHADOW_AUDIT 0x15 /* Basic audit of all except p2m. */
154 #define SHADOW_AUDIT_ENABLE shadow_audit_enable
155 extern int shadow_audit_enable;
156 #endif
158 /*
159 * Levels of optimization
160 * XXX should go in config files somewhere?
161 */
162 #define SHOPT_WRITABLE_HEURISTIC 0x01 /* Guess at RW PTEs via linear maps */
163 #define SHOPT_EARLY_UNSHADOW 0x02 /* Unshadow l1s on fork or exit */
165 #define SHADOW_OPTIMIZATIONS 0x03
168 /* With shadow pagetables, the different kinds of address start
169 * to get get confusing.
170 *
171 * Virtual addresses are what they usually are: the addresses that are used
172 * to accessing memory while the guest is running. The MMU translates from
173 * virtual addresses to machine addresses.
174 *
175 * (Pseudo-)physical addresses are the abstraction of physical memory the
176 * guest uses for allocation and so forth. For the purposes of this code,
177 * we can largely ignore them.
178 *
179 * Guest frame numbers (gfns) are the entries that the guest puts in its
180 * pagetables. For normal paravirtual guests, they are actual frame numbers,
181 * with the translation done by the guest.
182 *
183 * Machine frame numbers (mfns) are the entries that the hypervisor puts
184 * in the shadow page tables.
185 *
186 * Elsewhere in the xen code base, the name "gmfn" is generally used to refer
187 * to a "machine frame number, from the guest's perspective", or in other
188 * words, pseudo-physical frame numbers. However, in the shadow code, the
189 * term "gmfn" means "the mfn of a guest page"; this combines naturally with
190 * other terms such as "smfn" (the mfn of a shadow page), gl2mfn (the mfn of a
191 * guest L2 page), etc...
192 */
194 /* With this defined, we do some ugly things to force the compiler to
195 * give us type safety between mfns and gfns and other integers.
196 * TYPE_SAFE(int foo) defines a foo_t, and _foo() and foo_x() functions
197 * that translate beween int and foo_t.
198 *
199 * It does have some performance cost because the types now have
200 * a different storage attribute, so may not want it on all the time. */
201 #ifndef NDEBUG
202 #define TYPE_SAFETY 1
203 #endif
205 #ifdef TYPE_SAFETY
206 #define TYPE_SAFE(_type,_name) \
207 typedef struct { _type _name; } _name##_t; \
208 static inline _name##_t _##_name(_type n) { return (_name##_t) { n }; } \
209 static inline _type _name##_x(_name##_t n) { return n._name; }
210 #else
211 #define TYPE_SAFE(_type,_name) \
212 typedef _type _name##_t; \
213 static inline _name##_t _##_name(_type n) { return n; } \
214 static inline _type _name##_x(_name##_t n) { return n; }
215 #endif
217 TYPE_SAFE(unsigned long,mfn)
218 #define SH_PRI_mfn "05lx"
220 static inline int
221 valid_mfn(mfn_t m)
222 {
223 return VALID_MFN(mfn_x(m));
224 }
226 static inline mfn_t
227 pagetable_get_mfn(pagetable_t pt)
228 {
229 return _mfn(pagetable_get_pfn(pt));
230 }
232 static inline pagetable_t
233 pagetable_from_mfn(mfn_t mfn)
234 {
235 return pagetable_from_pfn(mfn_x(mfn));
236 }
238 static inline int
239 shadow_vcpu_mode_translate(struct vcpu *v)
240 {
241 // Returns true if this VCPU needs to be using the P2M table to translate
242 // between GFNs and MFNs.
243 //
244 // This is true of translated HVM domains on a vcpu which has paging
245 // enabled. (HVM vcpu's with paging disabled are using the p2m table as
246 // its paging table, so no translation occurs in this case.)
247 //
248 return v->arch.shadow.hvm_paging_enabled;
249 }
252 /**************************************************************************/
253 /* Mode-specific entry points into the shadow code */
255 struct x86_emulate_ctxt;
256 struct shadow_paging_mode {
257 int (*page_fault )(struct vcpu *v, unsigned long va,
258 struct cpu_user_regs *regs);
259 int (*invlpg )(struct vcpu *v, unsigned long va);
260 unsigned long (*gva_to_gpa )(struct vcpu *v, unsigned long va);
261 unsigned long (*gva_to_gfn )(struct vcpu *v, unsigned long va);
262 void (*update_cr3 )(struct vcpu *v);
263 int (*map_and_validate_gl1e )(struct vcpu *v, mfn_t gmfn,
264 void *new_guest_entry, u32 size);
265 int (*map_and_validate_gl2e )(struct vcpu *v, mfn_t gmfn,
266 void *new_guest_entry, u32 size);
267 int (*map_and_validate_gl2he)(struct vcpu *v, mfn_t gmfn,
268 void *new_guest_entry, u32 size);
269 int (*map_and_validate_gl3e )(struct vcpu *v, mfn_t gmfn,
270 void *new_guest_entry, u32 size);
271 int (*map_and_validate_gl4e )(struct vcpu *v, mfn_t gmfn,
272 void *new_guest_entry, u32 size);
273 void (*detach_old_tables )(struct vcpu *v);
274 int (*x86_emulate_write )(struct vcpu *v, unsigned long va,
275 void *src, u32 bytes,
276 struct x86_emulate_ctxt *ctxt);
277 int (*x86_emulate_cmpxchg )(struct vcpu *v, unsigned long va,
278 unsigned long old,
279 unsigned long new,
280 unsigned int bytes,
281 struct x86_emulate_ctxt *ctxt);
282 int (*x86_emulate_cmpxchg8b )(struct vcpu *v, unsigned long va,
283 unsigned long old_lo,
284 unsigned long old_hi,
285 unsigned long new_lo,
286 unsigned long new_hi,
287 struct x86_emulate_ctxt *ctxt);
288 mfn_t (*make_monitor_table )(struct vcpu *v);
289 void (*destroy_monitor_table )(struct vcpu *v, mfn_t mmfn);
290 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
291 int (*guess_wrmap )(struct vcpu *v,
292 unsigned long vaddr, mfn_t gmfn);
293 #endif
294 /* For outsiders to tell what mode we're in */
295 unsigned int shadow_levels;
296 unsigned int guest_levels;
297 };
299 static inline int shadow_guest_paging_levels(struct vcpu *v)
300 {
301 ASSERT(v->arch.shadow.mode != NULL);
302 return v->arch.shadow.mode->guest_levels;
303 }
305 /**************************************************************************/
306 /* Entry points into the shadow code */
308 /* Turning on shadow test mode */
309 int shadow_test_enable(struct domain *d);
311 /* Handler for shadow control ops: enabling and disabling shadow modes,
312 * and log-dirty bitmap ops all happen through here. */
313 int shadow_domctl(struct domain *d,
314 xen_domctl_shadow_op_t *sc,
315 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
317 /* Call when destroying a domain */
318 void shadow_teardown(struct domain *d);
320 /* Call once all of the references to the domain have gone away */
321 void shadow_final_teardown(struct domain *d);
324 /* Mark a page as dirty in the bitmap */
325 void sh_do_mark_dirty(struct domain *d, mfn_t gmfn);
326 static inline void mark_dirty(struct domain *d, unsigned long gmfn)
327 {
328 if ( likely(!shadow_mode_log_dirty(d)) )
329 return;
331 shadow_lock(d);
332 sh_do_mark_dirty(d, _mfn(gmfn));
333 shadow_unlock(d);
334 }
336 /* Internal version, for when the shadow lock is already held */
337 static inline void sh_mark_dirty(struct domain *d, mfn_t gmfn)
338 {
339 ASSERT(shadow_lock_is_acquired(d));
340 if ( unlikely(shadow_mode_log_dirty(d)) )
341 sh_do_mark_dirty(d, gmfn);
342 }
344 static inline int
345 shadow_fault(unsigned long va, struct cpu_user_regs *regs)
346 /* Called from pagefault handler in Xen, and from the HVM trap handlers
347 * for pagefaults. Returns 1 if this fault was an artefact of the
348 * shadow code (and the guest should retry) or 0 if it is not (and the
349 * fault should be handled elsewhere or passed to the guest). */
350 {
351 struct vcpu *v = current;
352 perfc_incrc(shadow_fault);
353 return v->arch.shadow.mode->page_fault(v, va, regs);
354 }
356 static inline int
357 shadow_invlpg(struct vcpu *v, unsigned long va)
358 /* Called when the guest requests an invlpg. Returns 1 if the invlpg
359 * instruction should be issued on the hardware, or 0 if it's safe not
360 * to do so. */
361 {
362 return v->arch.shadow.mode->invlpg(v, va);
363 }
365 static inline unsigned long
366 shadow_gva_to_gpa(struct vcpu *v, unsigned long va)
367 /* Called to translate a guest virtual address to what the *guest*
368 * pagetables would map it to. */
369 {
370 return v->arch.shadow.mode->gva_to_gpa(v, va);
371 }
373 static inline unsigned long
374 shadow_gva_to_gfn(struct vcpu *v, unsigned long va)
375 /* Called to translate a guest virtual address to what the *guest*
376 * pagetables would map it to. */
377 {
378 return v->arch.shadow.mode->gva_to_gfn(v, va);
379 }
381 static inline void
382 shadow_update_cr3(struct vcpu *v)
383 /* Updates all the things that are derived from the guest's CR3.
384 * Called when the guest changes CR3. */
385 {
386 shadow_lock(v->domain);
387 v->arch.shadow.mode->update_cr3(v);
388 shadow_unlock(v->domain);
389 }
392 /* Should be called after CR3 is updated.
393 * Updates vcpu->arch.cr3 and, for HVM guests, vcpu->arch.hvm_vcpu.cpu_cr3.
394 *
395 * Also updates other state derived from CR3 (vcpu->arch.guest_vtable,
396 * shadow_vtable, etc).
397 *
398 * Uses values found in vcpu->arch.(guest_table and guest_table_user), and
399 * for HVM guests, arch.monitor_table and hvm's guest CR3.
400 *
401 * Update ref counts to shadow tables appropriately.
402 * For PAE, relocate L3 entries, if necessary, into low memory.
403 */
404 static inline void update_cr3(struct vcpu *v)
405 {
406 unsigned long cr3_mfn=0;
408 if ( shadow_mode_enabled(v->domain) )
409 {
410 shadow_update_cr3(v);
411 return;
412 }
414 #if CONFIG_PAGING_LEVELS == 4
415 if ( !(v->arch.flags & TF_kernel_mode) )
416 cr3_mfn = pagetable_get_pfn(v->arch.guest_table_user);
417 else
418 #endif
419 cr3_mfn = pagetable_get_pfn(v->arch.guest_table);
421 make_cr3(v, cr3_mfn);
422 }
424 extern void sh_update_paging_modes(struct vcpu *v);
426 /* Should be called to initialise paging structures if the paging mode
427 * has changed, and when bringing up a VCPU for the first time. */
428 static inline void shadow_update_paging_modes(struct vcpu *v)
429 {
430 ASSERT(shadow_mode_enabled(v->domain));
431 shadow_lock(v->domain);
432 sh_update_paging_modes(v);
433 shadow_unlock(v->domain);
434 }
436 static inline void
437 shadow_detach_old_tables(struct vcpu *v)
438 {
439 if ( v->arch.shadow.mode )
440 v->arch.shadow.mode->detach_old_tables(v);
441 }
443 static inline mfn_t
444 shadow_make_monitor_table(struct vcpu *v)
445 {
446 return v->arch.shadow.mode->make_monitor_table(v);
447 }
449 static inline void
450 shadow_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
451 {
452 v->arch.shadow.mode->destroy_monitor_table(v, mmfn);
453 }
455 /* Validate a pagetable change from the guest and update the shadows. */
456 extern int shadow_validate_guest_entry(struct vcpu *v, mfn_t gmfn,
457 void *new_guest_entry);
459 /* Update the shadows in response to a pagetable write from a HVM guest */
460 extern void shadow_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn,
461 void *entry, u32 size);
463 /* Remove all writeable mappings of a guest frame from the shadows.
464 * Returns non-zero if we need to flush TLBs.
465 * level and fault_addr desribe how we found this to be a pagetable;
466 * level==0 means we have some other reason for revoking write access. */
467 extern int shadow_remove_write_access(struct vcpu *v, mfn_t readonly_mfn,
468 unsigned int level,
469 unsigned long fault_addr);
471 /* Remove all mappings of the guest mfn from the shadows.
472 * Returns non-zero if we need to flush TLBs. */
473 extern int shadow_remove_all_mappings(struct vcpu *v, mfn_t target_mfn);
475 void
476 shadow_remove_all_shadows_and_parents(struct vcpu *v, mfn_t gmfn);
477 /* This is a HVM page that we thing is no longer a pagetable.
478 * Unshadow it, and recursively unshadow pages that reference it. */
480 /* Remove all shadows of the guest mfn. */
481 extern void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int all);
482 static inline void shadow_remove_all_shadows(struct vcpu *v, mfn_t gmfn)
483 {
484 int was_locked = shadow_lock_is_acquired(v->domain);
485 if ( !was_locked )
486 shadow_lock(v->domain);
487 sh_remove_shadows(v, gmfn, 1);
488 if ( !was_locked )
489 shadow_unlock(v->domain);
490 }
492 /* Add a page to a domain */
493 void
494 shadow_guest_physmap_add_page(struct domain *d, unsigned long gfn,
495 unsigned long mfn);
497 /* Remove a page from a domain */
498 void
499 shadow_guest_physmap_remove_page(struct domain *d, unsigned long gfn,
500 unsigned long mfn);
502 /*
503 * Definitions for the shadow_flags field in page_info.
504 * These flags are stored on *guest* pages...
505 * Bits 1-13 are encodings for the shadow types.
506 */
507 #define PGC_SH_type_to_index(_type) ((_type) >> PGC_SH_type_shift)
508 #define SHF_page_type_mask \
509 (((1u << (PGC_SH_type_to_index(PGC_SH_max_shadow) + 1u)) - 1u) - \
510 ((1u << PGC_SH_type_to_index(PGC_SH_min_shadow)) - 1u))
512 #define SHF_L1_32 (1u << PGC_SH_type_to_index(PGC_SH_l1_32_shadow))
513 #define SHF_FL1_32 (1u << PGC_SH_type_to_index(PGC_SH_fl1_32_shadow))
514 #define SHF_L2_32 (1u << PGC_SH_type_to_index(PGC_SH_l2_32_shadow))
515 #define SHF_L1_PAE (1u << PGC_SH_type_to_index(PGC_SH_l1_pae_shadow))
516 #define SHF_FL1_PAE (1u << PGC_SH_type_to_index(PGC_SH_fl1_pae_shadow))
517 #define SHF_L2_PAE (1u << PGC_SH_type_to_index(PGC_SH_l2_pae_shadow))
518 #define SHF_L2H_PAE (1u << PGC_SH_type_to_index(PGC_SH_l2h_pae_shadow))
519 #define SHF_L3_PAE (1u << PGC_SH_type_to_index(PGC_SH_l3_pae_shadow))
520 #define SHF_L1_64 (1u << PGC_SH_type_to_index(PGC_SH_l1_64_shadow))
521 #define SHF_FL1_64 (1u << PGC_SH_type_to_index(PGC_SH_fl1_64_shadow))
522 #define SHF_L2_64 (1u << PGC_SH_type_to_index(PGC_SH_l2_64_shadow))
523 #define SHF_L3_64 (1u << PGC_SH_type_to_index(PGC_SH_l3_64_shadow))
524 #define SHF_L4_64 (1u << PGC_SH_type_to_index(PGC_SH_l4_64_shadow))
526 /* Used for hysteresis when automatically unhooking mappings on fork/exit */
527 #define SHF_unhooked_mappings (1u<<31)
529 /*
530 * Allocation of shadow pages
531 */
533 /* Return the minumum acceptable number of shadow pages a domain needs */
534 unsigned int shadow_min_acceptable_pages(struct domain *d);
536 /* Set the pool of shadow pages to the required number of MB.
537 * Input will be rounded up to at least min_acceptable_shadow_pages().
538 * Returns 0 for success, 1 for failure. */
539 unsigned int shadow_set_allocation(struct domain *d,
540 unsigned int megabytes,
541 int *preempted);
543 /* Return the size of the shadow pool, rounded up to the nearest MB */
544 static inline unsigned int shadow_get_allocation(struct domain *d)
545 {
546 unsigned int pg = d->arch.shadow.total_pages;
547 return ((pg >> (20 - PAGE_SHIFT))
548 + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
549 }
551 /*
552 * Linked list for chaining entries in the shadow hash table.
553 */
554 struct shadow_hash_entry {
555 struct shadow_hash_entry *next;
556 mfn_t smfn; /* MFN of the shadow */
557 #ifdef _x86_64_ /* Shorten 'n' so we don't waste a whole word on storing 't' */
558 unsigned long n:56; /* MFN of guest PT or GFN of guest superpage */
559 #else
560 unsigned long n; /* MFN of guest PT or GFN of guest superpage */
561 #endif
562 unsigned char t; /* shadow type bits, or 0 for empty */
563 };
565 #define SHADOW_HASH_BUCKETS 251
566 /* Other possibly useful primes are 509, 1021, 2039, 4093, 8191, 16381 */
569 #if SHADOW_OPTIMIZATIONS & SHOPT_CACHE_WALKS
570 /* Optimization: cache the results of guest walks. This helps with MMIO
571 * and emulated writes, which tend to issue very similar walk requests
572 * repeatedly. We keep the results of the last few walks, and blow
573 * away the cache on guest cr3 write, mode change, or page fault. */
575 #define SH_WALK_CACHE_ENTRIES 4
577 /* Rather than cache a guest walk, which would include mapped pointers
578 * to pages, we cache what a TLB would remember about the walk: the
579 * permissions and the l1 gfn */
580 struct shadow_walk_cache {
581 unsigned long va; /* The virtual address (or 0 == unused) */
582 unsigned long gfn; /* The gfn from the effective l1e */
583 u32 permissions; /* The aggregated permission bits */
584 };
585 #endif
588 /**************************************************************************/
589 /* Guest physmap (p2m) support */
591 /* Walk another domain's P2M table, mapping pages as we go */
592 extern mfn_t
593 sh_gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
596 /* General conversion function from gfn to mfn */
597 static inline mfn_t
598 sh_gfn_to_mfn(struct domain *d, unsigned long gfn)
599 {
600 if ( !shadow_mode_translate(d) )
601 return _mfn(gfn);
602 else if ( likely(current->domain == d) )
603 return _mfn(get_mfn_from_gpfn(gfn));
604 else
605 return sh_gfn_to_mfn_foreign(d, gfn);
606 }
608 // vcpu-specific version of gfn_to_mfn(). This is where we hide the dirty
609 // little secret that, for hvm guests with paging disabled, nearly all of the
610 // shadow code actually think that the guest is running on *untranslated* page
611 // tables (which is actually domain->phys_table).
612 //
613 static inline mfn_t
614 sh_vcpu_gfn_to_mfn(struct vcpu *v, unsigned long gfn)
615 {
616 if ( !shadow_vcpu_mode_translate(v) )
617 return _mfn(gfn);
618 if ( likely(current->domain == v->domain) )
619 return _mfn(get_mfn_from_gpfn(gfn));
620 return sh_gfn_to_mfn_foreign(v->domain, gfn);
621 }
623 static inline unsigned long
624 sh_mfn_to_gfn(struct domain *d, mfn_t mfn)
625 {
626 if ( shadow_mode_translate(d) )
627 return get_gpfn_from_mfn(mfn_x(mfn));
628 else
629 return mfn_x(mfn);
630 }
634 #endif /* _XEN_SHADOW_H */
636 /*
637 * Local variables:
638 * mode: C
639 * c-set-style: "BSD"
640 * c-basic-offset: 4
641 * indent-tabs-mode: nil
642 * End:
643 */