ia64/xen-unstable
changeset 13137:f7a2cd8b0a8e
[XEN] Better diagnostics of recursive shadow faults
Give a trace of the recursive fault instead of BUG()ing in the shadow handler.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
Give a trace of the recursive fault instead of BUG()ing in the shadow handler.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author | Tim Deegan <Tim.Deegan@xensource.com> |
---|---|
date | Wed Dec 20 11:53:01 2006 +0000 (2006-12-20) |
parents | e99ba0c6c046 |
children | caa1987679bd |
files | xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/include/asm-x86/shadow.h |
line diff
1.1 --- a/xen/arch/x86/mm/shadow/common.c Wed Dec 20 11:39:22 2006 +0000 1.2 +++ b/xen/arch/x86/mm/shadow/common.c Wed Dec 20 11:53:01 2006 +0000 1.3 @@ -553,7 +553,7 @@ shadow_validate_guest_entry(struct vcpu 1.4 { 1.5 int rc; 1.6 1.7 - ASSERT(shadow_lock_is_acquired(v->domain)); 1.8 + ASSERT(shadow_locked_by_me(v->domain)); 1.9 rc = __shadow_validate_guest_entry(v, gmfn, entry, sizeof(l1_pgentry_t)); 1.10 shadow_audit_tables(v); 1.11 return rc; 1.12 @@ -569,7 +569,7 @@ shadow_validate_guest_pt_write(struct vc 1.13 struct domain *d = v->domain; 1.14 int rc; 1.15 1.16 - ASSERT(shadow_lock_is_acquired(v->domain)); 1.17 + ASSERT(shadow_locked_by_me(v->domain)); 1.18 rc = __shadow_validate_guest_entry(v, gmfn, entry, size); 1.19 if ( rc & SHADOW_SET_FLUSH ) 1.20 /* Need to flush TLBs to pick up shadow PT changes */ 1.21 @@ -858,7 +858,7 @@ mfn_t shadow_alloc(struct domain *d, 1.22 void *p; 1.23 int i; 1.24 1.25 - ASSERT(shadow_lock_is_acquired(d)); 1.26 + ASSERT(shadow_locked_by_me(d)); 1.27 ASSERT(order <= SHADOW_MAX_ORDER); 1.28 ASSERT(shadow_type != SH_type_none); 1.29 perfc_incrc(shadow_alloc); 1.30 @@ -928,7 +928,7 @@ void shadow_free(struct domain *d, mfn_t 1.31 unsigned long mask; 1.32 int i; 1.33 1.34 - ASSERT(shadow_lock_is_acquired(d)); 1.35 + ASSERT(shadow_locked_by_me(d)); 1.36 perfc_incrc(shadow_free); 1.37 1.38 shadow_type = sp->type; 1.39 @@ -997,7 +997,7 @@ shadow_alloc_p2m_pages(struct domain *d) 1.40 { 1.41 struct page_info *pg; 1.42 u32 i; 1.43 - ASSERT(shadow_lock_is_acquired(d)); 1.44 + ASSERT(shadow_locked_by_me(d)); 1.45 1.46 if ( d->arch.shadow.total_pages 1.47 < (shadow_min_acceptable_pages(d) + (1<<SHADOW_MAX_ORDER)) ) 1.48 @@ -1143,7 +1143,7 @@ p2m_next_level(struct domain *d, mfn_t * 1.49 p2m_install_entry_in_monitors(d, (l3_pgentry_t *)p2m_entry); 1.50 /* Also, any vcpus running on shadows of the p2m need to 1.51 * reload their CR3s so the change propagates to the shadow */ 1.52 - ASSERT(shadow_lock_is_acquired(d)); 1.53 + ASSERT(shadow_locked_by_me(d)); 1.54 for_each_vcpu(d, v) 1.55 { 1.56 if ( pagetable_get_pfn(v->arch.guest_table) 1.57 @@ -1435,7 +1435,7 @@ static unsigned int set_sh_allocation(st 1.58 unsigned int lower_bound; 1.59 int j; 1.60 1.61 - ASSERT(shadow_lock_is_acquired(d)); 1.62 + ASSERT(shadow_locked_by_me(d)); 1.63 1.64 /* Don't allocate less than the minimum acceptable, plus one page per 1.65 * megabyte of RAM (for the p2m table) */ 1.66 @@ -1614,7 +1614,7 @@ static int shadow_hash_alloc(struct doma 1.67 { 1.68 struct shadow_page_info **table; 1.69 1.70 - ASSERT(shadow_lock_is_acquired(d)); 1.71 + ASSERT(shadow_locked_by_me(d)); 1.72 ASSERT(!d->arch.shadow.hash_table); 1.73 1.74 table = xmalloc_array(struct shadow_page_info *, SHADOW_HASH_BUCKETS); 1.75 @@ -1629,7 +1629,7 @@ static int shadow_hash_alloc(struct doma 1.76 * This function does not care whether the table is populated. */ 1.77 static void shadow_hash_teardown(struct domain *d) 1.78 { 1.79 - ASSERT(shadow_lock_is_acquired(d)); 1.80 + ASSERT(shadow_locked_by_me(d)); 1.81 ASSERT(d->arch.shadow.hash_table); 1.82 1.83 xfree(d->arch.shadow.hash_table); 1.84 @@ -1645,7 +1645,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v, 1.85 struct shadow_page_info *sp, *prev; 1.86 key_t key; 1.87 1.88 - ASSERT(shadow_lock_is_acquired(d)); 1.89 + ASSERT(shadow_locked_by_me(d)); 1.90 ASSERT(d->arch.shadow.hash_table); 1.91 ASSERT(t); 1.92 1.93 @@ -1699,7 +1699,7 @@ void shadow_hash_insert(struct vcpu *v, 1.94 struct shadow_page_info *sp; 1.95 key_t key; 1.96 1.97 - ASSERT(shadow_lock_is_acquired(d)); 1.98 + ASSERT(shadow_locked_by_me(d)); 1.99 ASSERT(d->arch.shadow.hash_table); 1.100 ASSERT(t); 1.101 1.102 @@ -1725,7 +1725,7 @@ void shadow_hash_delete(struct vcpu *v, 1.103 struct shadow_page_info *sp, *x; 1.104 key_t key; 1.105 1.106 - ASSERT(shadow_lock_is_acquired(d)); 1.107 + ASSERT(shadow_locked_by_me(d)); 1.108 ASSERT(d->arch.shadow.hash_table); 1.109 ASSERT(t); 1.110 1.111 @@ -1780,7 +1780,7 @@ static void hash_foreach(struct vcpu *v, 1.112 struct shadow_page_info *x; 1.113 1.114 /* Say we're here, to stop hash-lookups reordering the chains */ 1.115 - ASSERT(shadow_lock_is_acquired(d)); 1.116 + ASSERT(shadow_locked_by_me(d)); 1.117 ASSERT(d->arch.shadow.hash_walking == 0); 1.118 d->arch.shadow.hash_walking = 1; 1.119 1.120 @@ -1937,7 +1937,7 @@ int shadow_remove_write_access(struct vc 1.121 ; 1.122 struct page_info *pg = mfn_to_page(gmfn); 1.123 1.124 - ASSERT(shadow_lock_is_acquired(v->domain)); 1.125 + ASSERT(shadow_locked_by_me(v->domain)); 1.126 1.127 /* Only remove writable mappings if we are doing shadow refcounts. 1.128 * In guest refcounting, we trust Xen to already be restricting 1.129 @@ -2129,7 +2129,7 @@ int shadow_remove_all_mappings(struct vc 1.130 if ( (page->count_info & PGC_count_mask) == 0 ) 1.131 return 0; 1.132 1.133 - ASSERT(shadow_lock_is_acquired(v->domain)); 1.134 + ASSERT(shadow_locked_by_me(v->domain)); 1.135 1.136 /* XXX TODO: 1.137 * Heuristics for finding the (probably) single mapping of this gmfn */ 1.138 @@ -2296,7 +2296,7 @@ void sh_remove_shadows(struct vcpu *v, m 1.139 0 /* unused */ 1.140 }; 1.141 1.142 - ASSERT(shadow_lock_is_acquired(v->domain)); 1.143 + ASSERT(shadow_locked_by_me(v->domain)); 1.144 ASSERT(!(all && fast)); 1.145 1.146 pg = mfn_to_page(gmfn); 1.147 @@ -2382,7 +2382,7 @@ void sh_update_paging_modes(struct vcpu 1.148 struct shadow_paging_mode *old_mode = v->arch.shadow.mode; 1.149 mfn_t old_guest_table; 1.150 1.151 - ASSERT(shadow_lock_is_acquired(d)); 1.152 + ASSERT(shadow_locked_by_me(d)); 1.153 1.154 // Valid transitions handled by this function: 1.155 // - For PV guests: 1.156 @@ -2560,7 +2560,7 @@ static void sh_new_mode(struct domain *d 1.157 { 1.158 struct vcpu *v; 1.159 1.160 - ASSERT(shadow_lock_is_acquired(d)); 1.161 + ASSERT(shadow_locked_by_me(d)); 1.162 ASSERT(d != current->domain); 1.163 d->arch.shadow.mode = new_mode; 1.164 if ( new_mode & SHM2_translate ) 1.165 @@ -2661,7 +2661,7 @@ void shadow_teardown(struct domain *d) 1.166 ASSERT(test_bit(_DOMF_dying, &d->domain_flags)); 1.167 ASSERT(d != current->domain); 1.168 1.169 - if ( !shadow_lock_is_acquired(d) ) 1.170 + if ( !shadow_locked_by_me(d) ) 1.171 shadow_lock(d); /* Keep various asserts happy */ 1.172 1.173 if ( shadow_mode_enabled(d) ) 1.174 @@ -2744,7 +2744,7 @@ void shadow_final_teardown(struct domain 1.175 static int shadow_one_bit_enable(struct domain *d, u32 mode) 1.176 /* Turn on a single shadow mode feature */ 1.177 { 1.178 - ASSERT(shadow_lock_is_acquired(d)); 1.179 + ASSERT(shadow_locked_by_me(d)); 1.180 1.181 /* Sanity check the call */ 1.182 if ( d == current->domain || (d->arch.shadow.mode & mode) ) 1.183 @@ -2773,7 +2773,7 @@ static int shadow_one_bit_disable(struct 1.184 /* Turn off a single shadow mode feature */ 1.185 { 1.186 struct vcpu *v; 1.187 - ASSERT(shadow_lock_is_acquired(d)); 1.188 + ASSERT(shadow_locked_by_me(d)); 1.189 1.190 /* Sanity check the call */ 1.191 if ( d == current->domain || !(d->arch.shadow.mode & mode) ) 1.192 @@ -3134,7 +3134,7 @@ void sh_do_mark_dirty(struct domain *d, 1.193 { 1.194 unsigned long pfn; 1.195 1.196 - ASSERT(shadow_lock_is_acquired(d)); 1.197 + ASSERT(shadow_locked_by_me(d)); 1.198 ASSERT(shadow_mode_log_dirty(d)); 1.199 1.200 if ( !mfn_valid(gmfn) )
2.1 --- a/xen/arch/x86/mm/shadow/multi.c Wed Dec 20 11:39:22 2006 +0000 2.2 +++ b/xen/arch/x86/mm/shadow/multi.c Wed Dec 20 11:53:01 2006 +0000 2.3 @@ -227,7 +227,7 @@ guest_supports_nx(struct vcpu *v) 2.4 static inline int 2.5 guest_walk_tables(struct vcpu *v, unsigned long va, walk_t *gw, int guest_op) 2.6 { 2.7 - ASSERT(!guest_op || shadow_lock_is_acquired(v->domain)); 2.8 + ASSERT(!guest_op || shadow_locked_by_me(v->domain)); 2.9 2.10 perfc_incrc(shadow_guest_walk); 2.11 memset(gw, 0, sizeof(*gw)); 2.12 @@ -442,7 +442,7 @@ static u32 guest_set_ad_bits(struct vcpu 2.13 2.14 ASSERT(ep && !(((unsigned long)ep) & ((sizeof *ep) - 1))); 2.15 ASSERT(level <= GUEST_PAGING_LEVELS); 2.16 - ASSERT(shadow_lock_is_acquired(v->domain)); 2.17 + ASSERT(shadow_locked_by_me(v->domain)); 2.18 2.19 flags = guest_l1e_get_flags(*ep); 2.20 2.21 @@ -2657,6 +2657,18 @@ static int sh_page_fault(struct vcpu *v, 2.22 } 2.23 #endif /* SHOPT_FAST_FAULT_PATH */ 2.24 2.25 + /* Detect if this page fault happened while we were already in Xen 2.26 + * doing a shadow operation. If that happens, the only thing we can 2.27 + * do is let Xen's normal fault handlers try to fix it. In any case, 2.28 + * a diagnostic trace of the fault will be more useful than 2.29 + * a BUG() when we try to take the lock again. */ 2.30 + if ( unlikely(shadow_locked_by_me(d)) ) 2.31 + { 2.32 + SHADOW_ERROR("Recursive shadow fault: lock was taken by %s\n", 2.33 + d->arch.shadow.locker_function); 2.34 + return 0; 2.35 + } 2.36 + 2.37 shadow_lock(d); 2.38 2.39 shadow_audit_tables(v); 2.40 @@ -3343,7 +3355,7 @@ sh_update_cr3(struct vcpu *v) 2.41 u32 guest_idx=0; 2.42 #endif 2.43 2.44 - ASSERT(shadow_lock_is_acquired(v->domain)); 2.45 + ASSERT(shadow_locked_by_me(v->domain)); 2.46 ASSERT(v->arch.shadow.mode); 2.47 2.48 //// 2.49 @@ -3837,7 +3849,7 @@ sh_x86_emulate_write(struct vcpu *v, uns 2.50 if ( vaddr & (bytes-1) ) 2.51 return X86EMUL_UNHANDLEABLE; 2.52 2.53 - ASSERT(shadow_lock_is_acquired(v->domain)); 2.54 + ASSERT(shadow_locked_by_me(v->domain)); 2.55 ASSERT(((vaddr & ~PAGE_MASK) + bytes) <= PAGE_SIZE); 2.56 2.57 if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL ) 2.58 @@ -3865,7 +3877,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u 2.59 unsigned long prev; 2.60 int rv = X86EMUL_CONTINUE; 2.61 2.62 - ASSERT(shadow_lock_is_acquired(v->domain)); 2.63 + ASSERT(shadow_locked_by_me(v->domain)); 2.64 ASSERT(bytes <= sizeof(unsigned long)); 2.65 2.66 if ( vaddr & (bytes-1) ) 2.67 @@ -3914,7 +3926,7 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v, 2.68 u64 old, new, prev; 2.69 int rv = X86EMUL_CONTINUE; 2.70 2.71 - ASSERT(shadow_lock_is_acquired(v->domain)); 2.72 + ASSERT(shadow_locked_by_me(v->domain)); 2.73 2.74 if ( vaddr & 7 ) 2.75 return X86EMUL_UNHANDLEABLE;
3.1 --- a/xen/include/asm-x86/shadow.h Wed Dec 20 11:39:22 2006 +0000 3.2 +++ b/xen/include/asm-x86/shadow.h Wed Dec 20 11:53:01 2006 +0000 3.3 @@ -105,7 +105,7 @@ 3.4 (_d)->arch.shadow.locker_function = "nobody"; \ 3.5 } while (0) 3.6 3.7 -#define shadow_lock_is_acquired(_d) \ 3.8 +#define shadow_locked_by_me(_d) \ 3.9 (current->processor == (_d)->arch.shadow.locker) 3.10 3.11 #define shadow_lock(_d) \ 3.12 @@ -337,7 +337,7 @@ static inline void mark_dirty(struct dom 3.13 /* Internal version, for when the shadow lock is already held */ 3.14 static inline void sh_mark_dirty(struct domain *d, mfn_t gmfn) 3.15 { 3.16 - ASSERT(shadow_lock_is_acquired(d)); 3.17 + ASSERT(shadow_locked_by_me(d)); 3.18 if ( unlikely(shadow_mode_log_dirty(d)) ) 3.19 sh_do_mark_dirty(d, gmfn); 3.20 } 3.21 @@ -552,7 +552,7 @@ shadow_remove_all_shadows_and_parents(st 3.22 extern void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all); 3.23 static inline void shadow_remove_all_shadows(struct vcpu *v, mfn_t gmfn) 3.24 { 3.25 - int was_locked = shadow_lock_is_acquired(v->domain); 3.26 + int was_locked = shadow_locked_by_me(v->domain); 3.27 if ( !was_locked ) 3.28 shadow_lock(v->domain); 3.29 sh_remove_shadows(v, gmfn, 0, 1);