;
struct domain *d = v->domain;
struct page_info *pg = mfn_to_page(gmfn);
+#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
+ struct vcpu *curr = current;
+#endif
ASSERT(paging_locked_by_me(d));
}
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
- if ( v == current )
+ if ( curr->domain == d )
{
unsigned long gfn;
/* Heuristic: there is likely to be only one writeable mapping,
* in the guest's linear map (on non-HIGHPTE linux and windows)*/
#define GUESS(_a, _h) do { \
- if ( v->arch.paging.mode->shadow.guess_wrmap(v, (_a), gmfn) ) \
+ if ( curr->arch.paging.mode->shadow.guess_wrmap( \
+ curr, (_a), gmfn) ) \
perfc_incr(shadow_writeable_h_ ## _h); \
if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) \
{ \
} \
} while (0)
- if ( v->arch.paging.mode->guest_levels == 2 )
+ if ( curr->arch.paging.mode->guest_levels == 2 )
{
if ( level == 1 )
/* 32bit non-PAE w2k3: linear map at 0xC0000000 */
GUESS(0xBFC00000UL
+ ((fault_addr & VADDR_MASK) >> 10), 6);
}
- else if ( v->arch.paging.mode->guest_levels == 3 )
+ else if ( curr->arch.paging.mode->guest_levels == 3 )
{
/* 32bit PAE w2k3: linear map at 0xC0000000 */
switch ( level )
+ ((fault_addr & VADDR_MASK) >> 18), 6); break;
}
}
- else if ( v->arch.paging.mode->guest_levels == 4 )
+ else if ( curr->arch.paging.mode->guest_levels == 4 )
{
/* 64bit w2k3: linear map at 0xfffff68000000000 */
switch ( level )
* the writeable mapping by looking at the same MFN where the last
* brute-force search succeeded. */
- if ( v->arch.paging.shadow.last_writeable_pte_smfn != 0 )
+ if ( (curr->domain == d) &&
+ (curr->arch.paging.shadow.last_writeable_pte_smfn != 0) )
{
unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask);
- mfn_t last_smfn = _mfn(v->arch.paging.shadow.last_writeable_pte_smfn);
+ mfn_t last_smfn = _mfn(curr->arch.paging.shadow.last_writeable_pte_smfn);
int shtype = mfn_to_page(last_smfn)->u.sh.type;
if ( callbacks[shtype] )
- callbacks[shtype](v, last_smfn, gmfn);
+ callbacks[shtype](curr, last_smfn, gmfn);
if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count )
perfc_incr(shadow_writeable_h_5);
int sh_rm_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
mfn_t smfn, unsigned long off)
{
+ struct domain *d = v->domain;
+ struct vcpu *curr = current;
int r;
shadow_l1e_t *sl1p, sl1e;
struct page_info *sp;
ASSERT(mfn_valid(smfn));
/* Remember if we've been told that this process is being torn down */
- v->arch.paging.shadow.pagetable_dying
- = !!(mfn_to_page(gmfn)->shadow_flags & SHF_pagetable_dying);
-
+ if ( curr->domain == d )
+ curr->arch.paging.shadow.pagetable_dying
+ = !!(mfn_to_page(gmfn)->shadow_flags & SHF_pagetable_dying);
sp = mfn_to_page(smfn);
int done = 0;
int flags;
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
+ struct domain *d = v->domain;
+ struct vcpu *curr = current;
mfn_t base_sl1mfn = sl1mfn; /* Because sl1mfn changes in the foreach */
#endif
(void) shadow_set_l1e(v, sl1e, ro_sl1e, p2m_ram_rw, sl1mfn);
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
/* Remember the last shadow that we shot a writeable mapping in */
- v->arch.paging.shadow.last_writeable_pte_smfn = mfn_x(base_sl1mfn);
+ if ( curr->domain == d )
+ curr->arch.paging.shadow.last_writeable_pte_smfn = mfn_x(base_sl1mfn);
#endif
if ( (mfn_to_page(readonly_mfn)->u.inuse.type_info
& PGT_count_mask) == 0 )