ia64/xen-unstable
changeset 18825:285f8635f573
shadow: fix race between resync and page promotion.
This patch fixes a (hopefully) rare-occurring problem causing memory
corruption in 64 bit guests (and assertion failures in debug build
xen).
On a pagefault, set_l{3,4}e's resync_all was able to give write access
to a page in the guest still not shadowed.
Signed-off-by: Gianluca Guida <gianluca.guida@eu.citrix.com>
This patch fixes a (hopefully) rare-occurring problem causing memory
corruption in 64 bit guests (and assertion failures in debug build
xen).
On a pagefault, set_l{3,4}e's resync_all was able to give write access
to a page in the guest still not shadowed.
Signed-off-by: Gianluca Guida <gianluca.guida@eu.citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Mon Nov 24 11:12:20 2008 +0000 (2008-11-24) |
parents | 0b8c6c91c5a4 |
children | 612218519cb5 |
files | xen/arch/x86/mm/shadow/multi.c |
line diff
1.1 --- a/xen/arch/x86/mm/shadow/multi.c Mon Nov 24 11:11:20 2008 +0000 1.2 +++ b/xen/arch/x86/mm/shadow/multi.c Mon Nov 24 11:12:20 2008 +0000 1.3 @@ -866,9 +866,6 @@ static int shadow_set_l4e(struct vcpu *v 1.4 domain_crash(v->domain); 1.5 return SHADOW_SET_ERROR; 1.6 } 1.7 -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) 1.8 - shadow_resync_all(v, 0); 1.9 -#endif 1.10 } 1.11 1.12 /* Write the new entry */ 1.13 @@ -914,9 +911,6 @@ static int shadow_set_l3e(struct vcpu *v 1.14 domain_crash(v->domain); 1.15 return SHADOW_SET_ERROR; 1.16 } 1.17 -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) 1.18 - shadow_resync_all(v, 0); 1.19 -#endif 1.20 } 1.21 1.22 /* Write the new entry */ 1.23 @@ -1716,7 +1710,8 @@ static shadow_l4e_t * shadow_get_and_cre 1.24 static shadow_l3e_t * shadow_get_and_create_l3e(struct vcpu *v, 1.25 walk_t *gw, 1.26 mfn_t *sl3mfn, 1.27 - fetch_type_t ft) 1.28 + fetch_type_t ft, 1.29 + int *resync) 1.30 { 1.31 mfn_t sl4mfn; 1.32 shadow_l4e_t *sl4e; 1.33 @@ -1746,6 +1741,11 @@ static shadow_l3e_t * shadow_get_and_cre 1.34 ASSERT((r & SHADOW_SET_FLUSH) == 0); 1.35 if ( r & SHADOW_SET_ERROR ) 1.36 return NULL; 1.37 + 1.38 +#if (SHADOW_OPTIMIZATIONS && SHOPT_OUT_OF_SYNC ) 1.39 + *resync |= 1; 1.40 +#endif 1.41 + 1.42 } 1.43 /* Now follow it down a level. Guaranteed to succeed. */ 1.44 return sh_linear_l3_table(v) + shadow_l3_linear_offset(gw->va); 1.45 @@ -1756,14 +1756,15 @@ static shadow_l3e_t * shadow_get_and_cre 1.46 static shadow_l2e_t * shadow_get_and_create_l2e(struct vcpu *v, 1.47 walk_t *gw, 1.48 mfn_t *sl2mfn, 1.49 - fetch_type_t ft) 1.50 + fetch_type_t ft, 1.51 + int *resync) 1.52 { 1.53 #if GUEST_PAGING_LEVELS >= 4 /* 64bit... */ 1.54 mfn_t sl3mfn = _mfn(INVALID_MFN); 1.55 shadow_l3e_t *sl3e; 1.56 if ( !mfn_valid(gw->l2mfn) ) return NULL; /* No guest page. */ 1.57 /* Get the l3e */ 1.58 - sl3e = shadow_get_and_create_l3e(v, gw, &sl3mfn, ft); 1.59 + sl3e = shadow_get_and_create_l3e(v, gw, &sl3mfn, ft, resync); 1.60 if ( sl3e == NULL ) return NULL; 1.61 if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT ) 1.62 { 1.63 @@ -1795,6 +1796,11 @@ static shadow_l2e_t * shadow_get_and_cre 1.64 ASSERT((r & SHADOW_SET_FLUSH) == 0); 1.65 if ( r & SHADOW_SET_ERROR ) 1.66 return NULL; 1.67 + 1.68 +#if (SHADOW_OPTIMIZATIONS && SHOPT_OUT_OF_SYNC ) 1.69 + *resync |= 1; 1.70 +#endif 1.71 + 1.72 } 1.73 /* Now follow it down a level. Guaranteed to succeed. */ 1.74 return sh_linear_l2_table(v) + shadow_l2_linear_offset(gw->va); 1.75 @@ -1827,11 +1833,13 @@ static shadow_l1e_t * shadow_get_and_cre 1.76 fetch_type_t ft) 1.77 { 1.78 mfn_t sl2mfn; 1.79 + int resync = 0; 1.80 shadow_l2e_t *sl2e; 1.81 1.82 /* Get the l2e */ 1.83 - sl2e = shadow_get_and_create_l2e(v, gw, &sl2mfn, ft); 1.84 + sl2e = shadow_get_and_create_l2e(v, gw, &sl2mfn, ft, &resync); 1.85 if ( sl2e == NULL ) return NULL; 1.86 + 1.87 /* Install the sl1 in the l2e if it wasn't there or if we need to 1.88 * re-do it to fix a PSE dirty bit. */ 1.89 if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT 1.90 @@ -1877,6 +1885,14 @@ static shadow_l1e_t * shadow_get_and_cre 1.91 ASSERT((r & SHADOW_SET_FLUSH) == 0); 1.92 if ( r & SHADOW_SET_ERROR ) 1.93 return NULL; 1.94 + 1.95 +#if (SHADOW_OPTIMIZATIONS && SHOPT_OUT_OF_SYNC ) 1.96 + /* All pages walked are now pagetables. Safe to resync pages 1.97 + in case level 4 or 3 shadows were set. */ 1.98 + if ( resync ) 1.99 + shadow_resync_all(v, 0); 1.100 +#endif 1.101 + 1.102 /* This next line is important: in 32-on-PAE and 32-on-64 modes, 1.103 * the guest l1 table has an 8k shadow, and we need to return 1.104 * the right mfn of the pair. This call will set it for us as a 1.105 @@ -2158,6 +2174,10 @@ static int validate_gl4e(struct vcpu *v, 1.106 sl3mfn = get_shadow_status(v, gl3mfn, SH_type_l3_shadow); 1.107 else 1.108 result |= SHADOW_SET_ERROR; 1.109 + 1.110 +#if (SHADOW_OPTIMIZATIONS && SHOPT_OUT_OF_SYNC ) 1.111 + shadow_resync_all(v, 0); 1.112 +#endif 1.113 } 1.114 l4e_propagate_from_guest(v, new_gl4e, sl3mfn, &new_sl4e, ft_prefetch); 1.115 1.116 @@ -2210,6 +2230,10 @@ static int validate_gl3e(struct vcpu *v, 1.117 sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow); 1.118 else 1.119 result |= SHADOW_SET_ERROR; 1.120 + 1.121 +#if (SHADOW_OPTIMIZATIONS && SHOPT_OUT_OF_SYNC ) 1.122 + shadow_resync_all(v, 0); 1.123 +#endif 1.124 } 1.125 l3e_propagate_from_guest(v, new_gl3e, sl2mfn, &new_sl3e, ft_prefetch); 1.126 result |= shadow_set_l3e(v, sl3p, new_sl3e, sl3mfn);