direct-io.hg

view xen/include/asm-x86/shadow2.h @ 11217:6a8204e4619d

[XEN] Remove bogus assertion.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Mon Aug 21 13:36:05 2006 +0100 (2006-08-21)
parents 45a84091144e
children 0ea9a824c16c
line source
1 /******************************************************************************
2 * include/asm-x86/shadow2.h
3 *
4 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
5 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
6 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #ifndef _XEN_SHADOW2_H
24 #define _XEN_SHADOW2_H
26 #include <public/dom0_ops.h>
27 #include <xen/sched.h>
28 #include <xen/perfc.h>
29 #include <asm/flushtlb.h>
31 /* Shadow PT operation mode : shadow-mode variable in arch_domain. */
33 #define SHM2_shift 10
34 /* We're in one of the shadow modes */
35 #define SHM2_enable (DOM0_SHADOW2_CONTROL_FLAG_ENABLE << SHM2_shift)
36 /* Refcounts based on shadow tables instead of guest tables */
37 #define SHM2_refcounts (DOM0_SHADOW2_CONTROL_FLAG_REFCOUNT << SHM2_shift)
38 /* Enable log dirty mode */
39 #define SHM2_log_dirty (DOM0_SHADOW2_CONTROL_FLAG_LOG_DIRTY << SHM2_shift)
40 /* Xen does p2m translation, not guest */
41 #define SHM2_translate (DOM0_SHADOW2_CONTROL_FLAG_TRANSLATE << SHM2_shift)
42 /* Xen does not steal address space from the domain for its own booking;
43 * requires VT or similar mechanisms */
44 #define SHM2_external (DOM0_SHADOW2_CONTROL_FLAG_EXTERNAL << SHM2_shift)
46 #define shadow2_mode_enabled(_d) ((_d)->arch.shadow2.mode)
47 #define shadow2_mode_refcounts(_d) ((_d)->arch.shadow2.mode & SHM2_refcounts)
48 #define shadow2_mode_log_dirty(_d) ((_d)->arch.shadow2.mode & SHM2_log_dirty)
49 #define shadow2_mode_translate(_d) ((_d)->arch.shadow2.mode & SHM2_translate)
50 #define shadow2_mode_external(_d) ((_d)->arch.shadow2.mode & SHM2_external)
52 /* Xen traps & emulates all reads of all page table pages:
53 *not yet supported
54 */
55 #define shadow2_mode_trap_reads(_d) ({ (void)(_d); 0; })
57 // flags used in the return value of the shadow_set_lXe() functions...
58 #define SHADOW2_SET_CHANGED 0x1
59 #define SHADOW2_SET_FLUSH 0x2
60 #define SHADOW2_SET_ERROR 0x4
61 #define SHADOW2_SET_L3PAE_RECOPY 0x8
63 // How do we tell that we have a 32-bit PV guest in a 64-bit Xen?
64 #ifdef __x86_64__
65 #define pv_32bit_guest(_v) 0 // not yet supported
66 #else
67 #define pv_32bit_guest(_v) !hvm_guest(v)
68 #endif
70 /* The shadow2 lock.
71 *
72 * This lock is per-domain. It is intended to allow us to make atomic
73 * updates to the software TLB that the shadow tables provide.
74 *
75 * Specifically, it protects:
76 * - all changes to shadow page table pages
77 * - the shadow hash table
78 * - the shadow page allocator
79 * - all changes to guest page table pages; if/when the notion of
80 * out-of-sync pages is added to this code, then the shadow lock is
81 * protecting all guest page table pages which are not listed as
82 * currently as both guest-writable and out-of-sync...
83 * XXX -- need to think about this relative to writable page tables.
84 * - all changes to the page_info->tlbflush_timestamp
85 * - the page_info->count fields on shadow pages
86 * - the shadow dirty bit array and count
87 * - XXX
88 */
89 #ifndef CONFIG_SMP
90 #error shadow2.h currently requires CONFIG_SMP
91 #endif
93 #define shadow2_lock_init(_d) \
94 do { \
95 spin_lock_init(&(_d)->arch.shadow2.lock); \
96 (_d)->arch.shadow2.locker = -1; \
97 (_d)->arch.shadow2.locker_function = "nobody"; \
98 } while (0)
100 #define shadow2_lock_is_acquired(_d) \
101 (current->processor == (_d)->arch.shadow2.locker)
103 #define shadow2_lock(_d) \
104 do { \
105 if ( unlikely((_d)->arch.shadow2.locker == current->processor) ) \
106 { \
107 printk("Error: shadow2 lock held by %s\n", \
108 (_d)->arch.shadow2.locker_function); \
109 BUG(); \
110 } \
111 spin_lock(&(_d)->arch.shadow2.lock); \
112 ASSERT((_d)->arch.shadow2.locker == -1); \
113 (_d)->arch.shadow2.locker = current->processor; \
114 (_d)->arch.shadow2.locker_function = __func__; \
115 } while (0)
117 #define shadow2_unlock(_d) \
118 do { \
119 ASSERT((_d)->arch.shadow2.locker == current->processor); \
120 (_d)->arch.shadow2.locker = -1; \
121 (_d)->arch.shadow2.locker_function = "nobody"; \
122 spin_unlock(&(_d)->arch.shadow2.lock); \
123 } while (0)
125 /*
126 * Levels of self-test and paranoia
127 * XXX should go in config files somewhere?
128 */
129 #define SHADOW2_AUDIT_HASH 0x01 /* Check current hash bucket */
130 #define SHADOW2_AUDIT_HASH_FULL 0x02 /* Check every hash bucket */
131 #define SHADOW2_AUDIT_ENTRIES 0x04 /* Check this walk's shadows */
132 #define SHADOW2_AUDIT_ENTRIES_FULL 0x08 /* Check every shadow */
133 #define SHADOW2_AUDIT_ENTRIES_MFNS 0x10 /* Check gfn-mfn map in shadows */
134 #define SHADOW2_AUDIT_P2M 0x20 /* Check the p2m table */
136 #ifdef NDEBUG
137 #define SHADOW2_AUDIT 0
138 #define SHADOW2_AUDIT_ENABLE 0
139 #else
140 #define SHADOW2_AUDIT 0x15 /* Basic audit of all except p2m. */
141 #define SHADOW2_AUDIT_ENABLE shadow2_audit_enable
142 extern int shadow2_audit_enable;
143 #endif
145 /*
146 * Levels of optimization
147 * XXX should go in config files somewhere?
148 */
149 #define SH2OPT_WRITABLE_HEURISTIC 0x01 /* Guess at RW PTEs via linear maps */
150 #define SH2OPT_EARLY_UNSHADOW 0x02 /* Unshadow l1s on fork or exit */
152 #define SHADOW2_OPTIMIZATIONS 0x03
155 /* With shadow pagetables, the different kinds of address start
156 * to get get confusing.
157 *
158 * Virtual addresses are what they usually are: the addresses that are used
159 * to accessing memory while the guest is running. The MMU translates from
160 * virtual addresses to machine addresses.
161 *
162 * (Pseudo-)physical addresses are the abstraction of physical memory the
163 * guest uses for allocation and so forth. For the purposes of this code,
164 * we can largely ignore them.
165 *
166 * Guest frame numbers (gfns) are the entries that the guest puts in its
167 * pagetables. For normal paravirtual guests, they are actual frame numbers,
168 * with the translation done by the guest.
169 *
170 * Machine frame numbers (mfns) are the entries that the hypervisor puts
171 * in the shadow page tables.
172 *
173 * Elsewhere in the xen code base, the name "gmfn" is generally used to refer
174 * to a "machine frame number, from the guest's perspective", or in other
175 * words, pseudo-physical frame numbers. However, in the shadow code, the
176 * term "gmfn" means "the mfn of a guest page"; this combines naturally with
177 * other terms such as "smfn" (the mfn of a shadow page), gl2mfn (the mfn of a
178 * guest L2 page), etc...
179 */
181 /* With this defined, we do some ugly things to force the compiler to
182 * give us type safety between mfns and gfns and other integers.
183 * TYPE_SAFE(int foo) defines a foo_t, and _foo() and foo_x() functions
184 * that translate beween int and foo_t.
185 *
186 * It does have some performance cost because the types now have
187 * a different storage attribute, so may not want it on all the time. */
188 #ifndef NDEBUG
189 #define TYPE_SAFETY 1
190 #endif
192 #ifdef TYPE_SAFETY
193 #define TYPE_SAFE(_type,_name) \
194 typedef struct { _type _name; } _name##_t; \
195 static inline _name##_t _##_name(_type n) { return (_name##_t) { n }; } \
196 static inline _type _name##_x(_name##_t n) { return n._name; }
197 #else
198 #define TYPE_SAFE(_type,_name) \
199 typedef _type _name##_t; \
200 static inline _name##_t _##_name(_type n) { return n; } \
201 static inline _type _name##_x(_name##_t n) { return n; }
202 #endif
204 TYPE_SAFE(unsigned long,mfn)
205 #define SH2_PRI_mfn "05lx"
207 static inline int
208 valid_mfn(mfn_t m)
209 {
210 return VALID_MFN(mfn_x(m));
211 }
213 static inline mfn_t
214 pagetable_get_mfn(pagetable_t pt)
215 {
216 return _mfn(pagetable_get_pfn(pt));
217 }
219 static inline pagetable_t
220 pagetable_from_mfn(mfn_t mfn)
221 {
222 return pagetable_from_pfn(mfn_x(mfn));
223 }
225 static inline int
226 shadow2_vcpu_mode_translate(struct vcpu *v)
227 {
228 // Returns true if this VCPU needs to be using the P2M table to translate
229 // between GFNs and MFNs.
230 //
231 // This is true of translated HVM domains on a vcpu which has paging
232 // enabled. (HVM vcpu's with paging disabled are using the p2m table as
233 // its paging table, so no translation occurs in this case.)
234 //
235 return v->arch.shadow2.hvm_paging_enabled;
236 }
239 /**************************************************************************/
240 /* Mode-specific entry points into the shadow code */
242 struct x86_emulate_ctxt;
243 struct shadow2_paging_mode {
244 int (*page_fault )(struct vcpu *v, unsigned long va,
245 struct cpu_user_regs *regs);
246 int (*invlpg )(struct vcpu *v, unsigned long va);
247 unsigned long (*gva_to_gpa )(struct vcpu *v, unsigned long va);
248 unsigned long (*gva_to_gfn )(struct vcpu *v, unsigned long va);
249 void (*update_cr3 )(struct vcpu *v);
250 int (*map_and_validate_gl1e )(struct vcpu *v, mfn_t gmfn,
251 void *new_guest_entry, u32 size);
252 int (*map_and_validate_gl2e )(struct vcpu *v, mfn_t gmfn,
253 void *new_guest_entry, u32 size);
254 int (*map_and_validate_gl2he)(struct vcpu *v, mfn_t gmfn,
255 void *new_guest_entry, u32 size);
256 int (*map_and_validate_gl3e )(struct vcpu *v, mfn_t gmfn,
257 void *new_guest_entry, u32 size);
258 int (*map_and_validate_gl4e )(struct vcpu *v, mfn_t gmfn,
259 void *new_guest_entry, u32 size);
260 void (*detach_old_tables )(struct vcpu *v);
261 int (*x86_emulate_write )(struct vcpu *v, unsigned long va,
262 void *src, u32 bytes,
263 struct x86_emulate_ctxt *ctxt);
264 int (*x86_emulate_cmpxchg )(struct vcpu *v, unsigned long va,
265 unsigned long old,
266 unsigned long new,
267 unsigned int bytes,
268 struct x86_emulate_ctxt *ctxt);
269 int (*x86_emulate_cmpxchg8b )(struct vcpu *v, unsigned long va,
270 unsigned long old_lo,
271 unsigned long old_hi,
272 unsigned long new_lo,
273 unsigned long new_hi,
274 struct x86_emulate_ctxt *ctxt);
275 mfn_t (*make_monitor_table )(struct vcpu *v);
276 void (*destroy_monitor_table )(struct vcpu *v, mfn_t mmfn);
277 #if SHADOW2_OPTIMIZATIONS & SH2OPT_WRITABLE_HEURISTIC
278 int (*guess_wrmap )(struct vcpu *v,
279 unsigned long vaddr, mfn_t gmfn);
280 #endif
281 /* For outsiders to tell what mode we're in */
282 unsigned int shadow_levels;
283 unsigned int guest_levels;
284 };
286 static inline int shadow2_guest_paging_levels(struct vcpu *v)
287 {
288 ASSERT(v->arch.shadow2.mode != NULL);
289 return v->arch.shadow2.mode->guest_levels;
290 }
292 /**************************************************************************/
293 /* Entry points into the shadow code */
295 /* Turning on shadow2 test mode */
296 int shadow2_test_enable(struct domain *d);
298 /* Handler for shadow control ops: enabling and disabling shadow modes,
299 * and log-dirty bitmap ops all happen through here. */
300 int shadow2_control_op(struct domain *d,
301 dom0_shadow_control_t *sc,
302 XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op);
304 /* Call when destroying a domain */
305 void shadow2_teardown(struct domain *d);
307 /* Call once all of the references to the domain have gone away */
308 void shadow2_final_teardown(struct domain *d);
311 /* Mark a page as dirty in the bitmap */
312 void sh2_do_mark_dirty(struct domain *d, mfn_t gmfn);
313 static inline void mark_dirty(struct domain *d, unsigned long gmfn)
314 {
315 if ( shadow2_mode_log_dirty(d) )
316 {
317 shadow2_lock(d);
318 sh2_do_mark_dirty(d, _mfn(gmfn));
319 shadow2_unlock(d);
320 }
321 }
323 /* Internal version, for when the shadow lock is already held */
324 static inline void sh2_mark_dirty(struct domain *d, mfn_t gmfn)
325 {
326 ASSERT(shadow2_lock_is_acquired(d));
327 if ( shadow2_mode_log_dirty(d) )
328 sh2_do_mark_dirty(d, gmfn);
329 }
331 static inline int
332 shadow2_fault(unsigned long va, struct cpu_user_regs *regs)
333 /* Called from pagefault handler in Xen, and from the HVM trap handlers
334 * for pagefaults. Returns 1 if this fault was an artefact of the
335 * shadow code (and the guest should retry) or 0 if it is not (and the
336 * fault should be handled elsewhere or passed to the guest). */
337 {
338 struct vcpu *v = current;
339 perfc_incrc(shadow2_fault);
340 return v->arch.shadow2.mode->page_fault(v, va, regs);
341 }
343 static inline int
344 shadow2_invlpg(struct vcpu *v, unsigned long va)
345 /* Called when the guest requests an invlpg. Returns 1 if the invlpg
346 * instruction should be issued on the hardware, or 0 if it's safe not
347 * to do so. */
348 {
349 return v->arch.shadow2.mode->invlpg(v, va);
350 }
352 static inline unsigned long
353 shadow2_gva_to_gpa(struct vcpu *v, unsigned long va)
354 /* Called to translate a guest virtual address to what the *guest*
355 * pagetables would map it to. */
356 {
357 return v->arch.shadow2.mode->gva_to_gpa(v, va);
358 }
360 static inline unsigned long
361 shadow2_gva_to_gfn(struct vcpu *v, unsigned long va)
362 /* Called to translate a guest virtual address to what the *guest*
363 * pagetables would map it to. */
364 {
365 return v->arch.shadow2.mode->gva_to_gfn(v, va);
366 }
368 static inline void
369 shadow2_update_cr3(struct vcpu *v)
370 /* Updates all the things that are derived from the guest's CR3.
371 * Called when the guest changes CR3. */
372 {
373 shadow2_lock(v->domain);
374 v->arch.shadow2.mode->update_cr3(v);
375 shadow2_unlock(v->domain);
376 }
379 /* Should be called after CR3 is updated.
380 * Updates vcpu->arch.cr3 and, for HVM guests, vcpu->arch.hvm_vcpu.cpu_cr3.
381 *
382 * Also updates other state derived from CR3 (vcpu->arch.guest_vtable,
383 * shadow_vtable, etc).
384 *
385 * Uses values found in vcpu->arch.(guest_table and guest_table_user), and
386 * for HVM guests, arch.monitor_table and hvm's guest CR3.
387 *
388 * Update ref counts to shadow tables appropriately.
389 * For PAE, relocate L3 entries, if necessary, into low memory.
390 */
391 static inline void update_cr3(struct vcpu *v)
392 {
393 unsigned long cr3_mfn=0;
395 if ( shadow2_mode_enabled(v->domain) )
396 {
397 shadow2_update_cr3(v);
398 return;
399 }
401 #if CONFIG_PAGING_LEVELS == 4
402 if ( !(v->arch.flags & TF_kernel_mode) )
403 cr3_mfn = pagetable_get_pfn(v->arch.guest_table_user);
404 else
405 #endif
406 cr3_mfn = pagetable_get_pfn(v->arch.guest_table);
408 make_cr3(v, cr3_mfn);
409 }
411 extern void sh2_update_paging_modes(struct vcpu *v);
413 /* Should be called to initialise paging structures if the paging mode
414 * has changed, and when bringing up a VCPU for the first time. */
415 static inline void shadow2_update_paging_modes(struct vcpu *v)
416 {
417 ASSERT(shadow2_mode_enabled(v->domain));
418 shadow2_lock(v->domain);
419 sh2_update_paging_modes(v);
420 shadow2_unlock(v->domain);
421 }
423 static inline void
424 shadow2_detach_old_tables(struct vcpu *v)
425 {
426 if ( v->arch.shadow2.mode )
427 v->arch.shadow2.mode->detach_old_tables(v);
428 }
430 static inline mfn_t
431 shadow2_make_monitor_table(struct vcpu *v)
432 {
433 return v->arch.shadow2.mode->make_monitor_table(v);
434 }
436 static inline void
437 shadow2_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
438 {
439 v->arch.shadow2.mode->destroy_monitor_table(v, mmfn);
440 }
442 /* Validate a pagetable change from the guest and update the shadows. */
443 extern int shadow2_validate_guest_entry(struct vcpu *v, mfn_t gmfn,
444 void *new_guest_entry);
446 /* Update the shadows in response to a pagetable write from a HVM guest */
447 extern void shadow2_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn,
448 void *entry, u32 size);
450 /* Remove all writeable mappings of a guest frame from the shadows.
451 * Returns non-zero if we need to flush TLBs.
452 * level and fault_addr desribe how we found this to be a pagetable;
453 * level==0 means we have some other reason for revoking write access. */
454 extern int shadow2_remove_write_access(struct vcpu *v, mfn_t readonly_mfn,
455 unsigned int level,
456 unsigned long fault_addr);
458 /* Remove all mappings of the guest mfn from the shadows.
459 * Returns non-zero if we need to flush TLBs. */
460 extern int shadow2_remove_all_mappings(struct vcpu *v, mfn_t target_mfn);
462 void
463 shadow2_remove_all_shadows_and_parents(struct vcpu *v, mfn_t gmfn);
464 /* This is a HVM page that we thing is no longer a pagetable.
465 * Unshadow it, and recursively unshadow pages that reference it. */
467 /* Remove all shadows of the guest mfn. */
468 extern void sh2_remove_shadows(struct vcpu *v, mfn_t gmfn, int all);
469 static inline void shadow2_remove_all_shadows(struct vcpu *v, mfn_t gmfn)
470 {
471 sh2_remove_shadows(v, gmfn, 1);
472 }
474 /* Add a page to a domain */
475 void
476 shadow2_guest_physmap_add_page(struct domain *d, unsigned long gfn,
477 unsigned long mfn);
479 /* Remove a page from a domain */
480 void
481 shadow2_guest_physmap_remove_page(struct domain *d, unsigned long gfn,
482 unsigned long mfn);
484 /*
485 * Definitions for the shadow2_flags field in page_info.
486 * These flags are stored on *guest* pages...
487 * Bits 1-13 are encodings for the shadow types.
488 */
489 #define PGC_SH2_type_to_index(_type) ((_type) >> PGC_SH2_type_shift)
490 #define SH2F_page_type_mask \
491 (((1u << (PGC_SH2_type_to_index(PGC_SH2_max_shadow) + 1u)) - 1u) - \
492 ((1u << PGC_SH2_type_to_index(PGC_SH2_min_shadow)) - 1u))
494 #define SH2F_L1_32 (1u << PGC_SH2_type_to_index(PGC_SH2_l1_32_shadow))
495 #define SH2F_FL1_32 (1u << PGC_SH2_type_to_index(PGC_SH2_fl1_32_shadow))
496 #define SH2F_L2_32 (1u << PGC_SH2_type_to_index(PGC_SH2_l2_32_shadow))
497 #define SH2F_L1_PAE (1u << PGC_SH2_type_to_index(PGC_SH2_l1_pae_shadow))
498 #define SH2F_FL1_PAE (1u << PGC_SH2_type_to_index(PGC_SH2_fl1_pae_shadow))
499 #define SH2F_L2_PAE (1u << PGC_SH2_type_to_index(PGC_SH2_l2_pae_shadow))
500 #define SH2F_L2H_PAE (1u << PGC_SH2_type_to_index(PGC_SH2_l2h_pae_shadow))
501 #define SH2F_L3_PAE (1u << PGC_SH2_type_to_index(PGC_SH2_l3_pae_shadow))
502 #define SH2F_L1_64 (1u << PGC_SH2_type_to_index(PGC_SH2_l1_64_shadow))
503 #define SH2F_FL1_64 (1u << PGC_SH2_type_to_index(PGC_SH2_fl1_64_shadow))
504 #define SH2F_L2_64 (1u << PGC_SH2_type_to_index(PGC_SH2_l2_64_shadow))
505 #define SH2F_L3_64 (1u << PGC_SH2_type_to_index(PGC_SH2_l3_64_shadow))
506 #define SH2F_L4_64 (1u << PGC_SH2_type_to_index(PGC_SH2_l4_64_shadow))
508 /* Used for hysteresis when automatically unhooking mappings on fork/exit */
509 #define SH2F_unhooked_mappings (1u<<31)
511 /*
512 * Allocation of shadow pages
513 */
515 /* Return the minumum acceptable number of shadow pages a domain needs */
516 unsigned int shadow2_min_acceptable_pages(struct domain *d);
518 /* Set the pool of shadow pages to the required number of MB.
519 * Input will be rounded up to at least min_acceptable_shadow_pages().
520 * Returns 0 for success, 1 for failure. */
521 unsigned int shadow2_set_allocation(struct domain *d,
522 unsigned int megabytes,
523 int *preempted);
525 /* Return the size of the shadow2 pool, rounded up to the nearest MB */
526 static inline unsigned int shadow2_get_allocation(struct domain *d)
527 {
528 unsigned int pg = d->arch.shadow2.total_pages;
529 return ((pg >> (20 - PAGE_SHIFT))
530 + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
531 }
533 /*
534 * Linked list for chaining entries in the shadow hash table.
535 */
536 struct shadow2_hash_entry {
537 struct shadow2_hash_entry *next;
538 mfn_t smfn; /* MFN of the shadow */
539 #ifdef _x86_64_ /* Shorten 'n' so we don't waste a whole word on storing 't' */
540 unsigned long n:56; /* MFN of guest PT or GFN of guest superpage */
541 #else
542 unsigned long n; /* MFN of guest PT or GFN of guest superpage */
543 #endif
544 unsigned char t; /* shadow type bits, or 0 for empty */
545 };
547 #define SHADOW2_HASH_BUCKETS 251
548 /* Other possibly useful primes are 509, 1021, 2039, 4093, 8191, 16381 */
551 #if SHADOW2_OPTIMIZATIONS & SH2OPT_CACHE_WALKS
552 /* Optimization: cache the results of guest walks. This helps with MMIO
553 * and emulated writes, which tend to issue very similar walk requests
554 * repeatedly. We keep the results of the last few walks, and blow
555 * away the cache on guest cr3 write, mode change, or page fault. */
557 #define SH2_WALK_CACHE_ENTRIES 4
559 /* Rather than cache a guest walk, which would include mapped pointers
560 * to pages, we cache what a TLB would remember about the walk: the
561 * permissions and the l1 gfn */
562 struct shadow2_walk_cache {
563 unsigned long va; /* The virtual address (or 0 == unused) */
564 unsigned long gfn; /* The gfn from the effective l1e */
565 u32 permissions; /* The aggregated permission bits */
566 };
567 #endif
570 /**************************************************************************/
571 /* Guest physmap (p2m) support */
573 /* Walk another domain's P2M table, mapping pages as we go */
574 extern mfn_t
575 sh2_gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
578 /* General conversion function from gfn to mfn */
579 static inline mfn_t
580 sh2_gfn_to_mfn(struct domain *d, unsigned long gfn)
581 {
582 if ( !shadow2_mode_translate(d) )
583 return _mfn(gfn);
584 else if ( likely(current->domain == d) )
585 return _mfn(get_mfn_from_gpfn(gfn));
586 else
587 return sh2_gfn_to_mfn_foreign(d, gfn);
588 }
590 // vcpu-specific version of gfn_to_mfn(). This is where we hide the dirty
591 // little secret that, for hvm guests with paging disabled, nearly all of the
592 // shadow code actually think that the guest is running on *untranslated* page
593 // tables (which is actually domain->phys_table).
594 //
595 static inline mfn_t
596 sh2_vcpu_gfn_to_mfn(struct vcpu *v, unsigned long gfn)
597 {
598 if ( !shadow2_vcpu_mode_translate(v) )
599 return _mfn(gfn);
600 if ( likely(current->domain == v->domain) )
601 return _mfn(get_mfn_from_gpfn(gfn));
602 return sh2_gfn_to_mfn_foreign(v->domain, gfn);
603 }
605 static inline unsigned long
606 sh2_mfn_to_gfn(struct domain *d, mfn_t mfn)
607 {
608 if ( shadow2_mode_translate(d) )
609 return get_gpfn_from_mfn(mfn_x(mfn));
610 else
611 return mfn_x(mfn);
612 }
616 #endif /* _XEN_SHADOW2_H */
618 /*
619 * Local variables:
620 * mode: C
621 * c-set-style: "BSD"
622 * c-basic-offset: 4
623 * indent-tabs-mode: nil
624 * End:
625 */