ia64/xen-unstable
changeset 13138:caa1987679bd
[XEN] Skip shadowing of guest PTE writes when known to be safe
That is, when the guest replaces a not-present pte with another one
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
That is, when the guest replaces a not-present pte with another one
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author | Tim Deegan <Tim.Deegan@xensource.com> |
---|---|
date | Wed Dec 20 11:54:57 2006 +0000 (2006-12-20) |
parents | f7a2cd8b0a8e |
children | e2fcb70bec59 |
files | xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/private.h xen/include/asm-x86/shadow.h |
line diff
1.1 --- a/xen/arch/x86/mm/shadow/multi.c Wed Dec 20 11:53:01 2006 +0000 1.2 +++ b/xen/arch/x86/mm/shadow/multi.c Wed Dec 20 11:54:57 2006 +0000 1.3 @@ -3839,12 +3839,43 @@ static inline void * emulate_map_dest(st 1.4 return NULL; 1.5 } 1.6 1.7 +static int safe_not_to_verify_write(mfn_t gmfn, void *dst, void *src, 1.8 + int bytes) 1.9 +{ 1.10 +#if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY) 1.11 + struct page_info *pg = mfn_to_page(gmfn); 1.12 + if ( !(pg->shadow_flags & SHF_32) 1.13 + && bytes == 4 1.14 + && ((unsigned long)dst & 3) == 0 ) 1.15 + { 1.16 + /* Not shadowed 32-bit: aligned 64-bit writes that leave the 1.17 + * present bit unset are safe to ignore. */ 1.18 + if ( (*(u64*)src & _PAGE_PRESENT) == 0 1.19 + && (*(u64*)dst & _PAGE_PRESENT) == 0 ) 1.20 + return 1; 1.21 + } 1.22 + else if ( !(pg->shadow_flags & (SHF_PAE|SHF_64)) 1.23 + && bytes == 8 1.24 + && ((unsigned long)dst & 7) == 0 ) 1.25 + { 1.26 + /* Not shadowed PAE/64-bit: aligned 32-bit writes that leave the 1.27 + * present bit unset are safe to ignore. */ 1.28 + if ( (*(u32*)src & _PAGE_PRESENT) == 0 1.29 + && (*(u32*)dst & _PAGE_PRESENT) == 0 ) 1.30 + return 1; 1.31 + } 1.32 +#endif 1.33 + return 0; 1.34 +} 1.35 + 1.36 + 1.37 int 1.38 sh_x86_emulate_write(struct vcpu *v, unsigned long vaddr, void *src, 1.39 u32 bytes, struct sh_emulate_ctxt *sh_ctxt) 1.40 { 1.41 mfn_t mfn; 1.42 void *addr; 1.43 + int skip; 1.44 1.45 if ( vaddr & (bytes-1) ) 1.46 return X86EMUL_UNHANDLEABLE; 1.47 @@ -3855,8 +3886,9 @@ sh_x86_emulate_write(struct vcpu *v, uns 1.48 if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL ) 1.49 return X86EMUL_PROPAGATE_FAULT; 1.50 1.51 + skip = safe_not_to_verify_write(mfn, addr, src, bytes); 1.52 memcpy(addr, src, bytes); 1.53 - shadow_validate_guest_pt_write(v, mfn, addr, bytes); 1.54 + if ( !skip ) shadow_validate_guest_pt_write(v, mfn, addr, bytes); 1.55 1.56 /* If we are writing zeros to this page, might want to unshadow */ 1.57 if ( likely(bytes >= 4) && (*(u32 *)addr == 0) ) 1.58 @@ -3875,7 +3907,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u 1.59 mfn_t mfn; 1.60 void *addr; 1.61 unsigned long prev; 1.62 - int rv = X86EMUL_CONTINUE; 1.63 + int rv = X86EMUL_CONTINUE, skip; 1.64 1.65 ASSERT(shadow_locked_by_me(v->domain)); 1.66 ASSERT(bytes <= sizeof(unsigned long)); 1.67 @@ -3886,6 +3918,8 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u 1.68 if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL ) 1.69 return X86EMUL_PROPAGATE_FAULT; 1.70 1.71 + skip = safe_not_to_verify_write(mfn, &new, &old, bytes); 1.72 + 1.73 switch ( bytes ) 1.74 { 1.75 case 1: prev = cmpxchg(((u8 *)addr), old, new); break; 1.76 @@ -3898,7 +3932,9 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u 1.77 } 1.78 1.79 if ( prev == old ) 1.80 - shadow_validate_guest_pt_write(v, mfn, addr, bytes); 1.81 + { 1.82 + if ( !skip ) shadow_validate_guest_pt_write(v, mfn, addr, bytes); 1.83 + } 1.84 else 1.85 rv = X86EMUL_CMPXCHG_FAILED; 1.86 1.87 @@ -3924,7 +3960,7 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v, 1.88 mfn_t mfn; 1.89 void *addr; 1.90 u64 old, new, prev; 1.91 - int rv = X86EMUL_CONTINUE; 1.92 + int rv = X86EMUL_CONTINUE, skip; 1.93 1.94 ASSERT(shadow_locked_by_me(v->domain)); 1.95 1.96 @@ -3936,10 +3972,13 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v, 1.97 1.98 old = (((u64) old_hi) << 32) | (u64) old_lo; 1.99 new = (((u64) new_hi) << 32) | (u64) new_lo; 1.100 + skip = safe_not_to_verify_write(mfn, &new, &old, 8); 1.101 prev = cmpxchg(((u64 *)addr), old, new); 1.102 1.103 if ( prev == old ) 1.104 - shadow_validate_guest_pt_write(v, mfn, addr, 8); 1.105 + { 1.106 + if ( !skip ) shadow_validate_guest_pt_write(v, mfn, addr, 8); 1.107 + } 1.108 else 1.109 rv = X86EMUL_CMPXCHG_FAILED; 1.110
2.1 --- a/xen/arch/x86/mm/shadow/private.h Wed Dec 20 11:53:01 2006 +0000 2.2 +++ b/xen/arch/x86/mm/shadow/private.h Wed Dec 20 11:54:57 2006 +0000 2.3 @@ -249,6 +249,10 @@ static inline int sh_type_is_pinnable(st 2.4 #define SHF_L3_64 (1u << SH_type_l3_64_shadow) 2.5 #define SHF_L4_64 (1u << SH_type_l4_64_shadow) 2.6 2.7 +#define SHF_32 (SHF_L1_32|SHF_FL1_32|SHF_L2_32) 2.8 +#define SHF_PAE (SHF_L1_PAE|SHF_FL1_PAE|SHF_L2_PAE|SHF_L2H_PAE) 2.9 +#define SHF_64 (SHF_L1_64|SHF_FL1_64|SHF_L2_64|SHF_L3_64|SHF_L4_64) 2.10 + 2.11 /* Used for hysteresis when automatically unhooking mappings on fork/exit */ 2.12 #define SHF_unhooked_mappings (1u<<31) 2.13
3.1 --- a/xen/include/asm-x86/shadow.h Wed Dec 20 11:53:01 2006 +0000 3.2 +++ b/xen/include/asm-x86/shadow.h Wed Dec 20 11:54:57 2006 +0000 3.3 @@ -159,8 +159,9 @@ extern int shadow_audit_enable; 3.4 #define SHOPT_FAST_FAULT_PATH 0x04 /* Fast-path MMIO and not-present */ 3.5 #define SHOPT_PREFETCH 0x08 /* Shadow multiple entries per fault */ 3.6 #define SHOPT_LINUX_L3_TOPLEVEL 0x10 /* Pin l3es on early 64bit linux */ 3.7 +#define SHOPT_SKIP_VERIFY 0x20 /* Skip PTE v'fy when safe to do so */ 3.8 3.9 -#define SHADOW_OPTIMIZATIONS 0x1f 3.10 +#define SHADOW_OPTIMIZATIONS 0x3f 3.11 3.12 3.13 /* With shadow pagetables, the different kinds of address start