int i;
for ( i = 0; i < SHADOW_OOS_PAGES; i++ )
+ {
v->arch.paging.shadow.oos[i] = _mfn(INVALID_MFN);
+ v->arch.paging.shadow.oos_snapshot[i] = _mfn(INVALID_MFN);
+ }
#endif
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
#endif
/* Update the shadow, but keep the page out of sync. */
-static inline void _sh_resync_l1(struct vcpu *v, mfn_t gmfn)
+static inline void _sh_resync_l1(struct vcpu *v, mfn_t gmfn, mfn_t snpmfn)
{
struct page_info *pg = mfn_to_page(gmfn);
/* Call out to the appropriate per-mode resyncing function */
if ( pg->shadow_flags & SHF_L1_32 )
- SHADOW_INTERNAL_NAME(sh_resync_l1, 2)(v, gmfn);
+ SHADOW_INTERNAL_NAME(sh_resync_l1, 2)(v, gmfn, snpmfn);
else if ( pg->shadow_flags & SHF_L1_PAE )
- SHADOW_INTERNAL_NAME(sh_resync_l1, 3)(v, gmfn);
+ SHADOW_INTERNAL_NAME(sh_resync_l1, 3)(v, gmfn, snpmfn);
#if CONFIG_PAGING_LEVELS >= 4
else if ( pg->shadow_flags & SHF_L1_64 )
- SHADOW_INTERNAL_NAME(sh_resync_l1, 4)(v, gmfn);
+ SHADOW_INTERNAL_NAME(sh_resync_l1, 4)(v, gmfn, snpmfn);
#endif
}
/* Pull all the entries on an out-of-sync page back into sync. */
-static void _sh_resync(struct vcpu *v, mfn_t gmfn, unsigned long va)
+static void _sh_resync(struct vcpu *v, mfn_t gmfn, unsigned long va, mfn_t snp)
{
struct page_info *pg = mfn_to_page(gmfn);
pg->shadow_flags &= ~SHF_oos_may_write;
/* Update the shadows with current guest entries. */
- _sh_resync_l1(v, gmfn);
+ _sh_resync_l1(v, gmfn, snp);
/* Now we know all the entries are synced, and will stay that way */
pg->shadow_flags &= ~SHF_out_of_sync;
/* Add an MFN to the list of out-of-sync guest pagetables */
static void oos_hash_add(struct vcpu *v, mfn_t gmfn, unsigned long va)
{
- int idx;
+ int idx, oidx, swap = 0;
+ void *gptr, *gsnpptr;
mfn_t *oos = v->arch.paging.shadow.oos;
unsigned long *oos_va = v->arch.paging.shadow.oos_va;
+ mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
+ oidx = idx;
+
if ( mfn_valid(oos[idx])
&& (mfn_x(oos[idx]) % SHADOW_OOS_PAGES) == idx )
{
/* Punt the current occupant into the next slot */
SWAP(oos[idx], gmfn);
SWAP(oos_va[idx], va);
+ swap = 1;
idx = (idx + 1) % SHADOW_OOS_PAGES;
}
if ( mfn_valid(oos[idx]) )
{
/* Crush the current occupant. */
- _sh_resync(v, oos[idx], oos_va[idx]);
+ _sh_resync(v, oos[idx], oos_va[idx], oos_snapshot[idx]);
perfc_incr(shadow_unsync_evict);
}
oos[idx] = gmfn;
oos_va[idx] = va;
+
+ if ( swap )
+ SWAP(oos_snapshot[idx], oos_snapshot[oidx]);
+
+ gptr = sh_map_domain_page(oos[oidx]);
+ gsnpptr = sh_map_domain_page(oos_snapshot[oidx]);
+ memcpy(gsnpptr, gptr, PAGE_SIZE);
+ sh_unmap_domain_page(gptr);
+ sh_unmap_domain_page(gsnpptr);
}
/* Remove an MFN from the list of out-of-sync guest pagetables */
BUG();
}
+mfn_t oos_snapshot_lookup(struct vcpu *v, mfn_t gmfn)
+{
+ int idx;
+ mfn_t *oos;
+ mfn_t *oos_snapshot;
+ struct domain *d = v->domain;
+
+ for_each_vcpu(d, v)
+ {
+ oos = v->arch.paging.shadow.oos;
+ oos_snapshot = v->arch.paging.shadow.oos_snapshot;
+ idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
+ if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
+ idx = (idx + 1) % SHADOW_OOS_PAGES;
+ if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
+ {
+ return oos_snapshot[idx];
+ }
+ }
+
+ SHADOW_ERROR("gmfn %lx was OOS but not in hash table\n", mfn_x(gmfn));
+ BUG();
+ return _mfn(INVALID_MFN);
+}
+
/* Pull a single guest page back into sync */
void sh_resync(struct vcpu *v, mfn_t gmfn)
{
int idx;
mfn_t *oos;
unsigned long *oos_va;
+ mfn_t *oos_snapshot;
struct domain *d = v->domain;
for_each_vcpu(d, v)
{
oos = v->arch.paging.shadow.oos;
oos_va = v->arch.paging.shadow.oos_va;
+ oos_snapshot = v->arch.paging.shadow.oos_snapshot;
idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
idx = (idx + 1) % SHADOW_OOS_PAGES;
if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
{
- _sh_resync(v, gmfn, oos_va[idx]);
+ _sh_resync(v, gmfn, oos_va[idx], oos_snapshot[idx]);
oos[idx] = _mfn(INVALID_MFN);
return;
}
struct vcpu *other;
mfn_t *oos = v->arch.paging.shadow.oos;
unsigned long *oos_va = v->arch.paging.shadow.oos_va;
+ mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
SHADOW_PRINTK("d=%d, v=%d\n", v->domain->domain_id, v->vcpu_id);
if ( mfn_valid(oos[idx]) )
{
/* Write-protect and sync contents */
- _sh_resync(v, oos[idx], oos_va[idx]);
+ _sh_resync(v, oos[idx], oos_va[idx], oos_snapshot[idx]);
oos[idx] = _mfn(INVALID_MFN);
}
oos = other->arch.paging.shadow.oos;
oos_va = other->arch.paging.shadow.oos_va;
-
+ oos_snapshot = other->arch.paging.shadow.oos_snapshot;
for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
{
if ( !mfn_valid(oos[idx]) )
/* Update the shadows and leave the page OOS. */
if ( sh_skip_sync(v, oos[idx]) )
continue;
- _sh_resync_l1(other, oos[idx]);
+ _sh_resync_l1(other, oos[idx], oos_snapshot[idx]);
}
else
{
/* Write-protect and sync contents */
- _sh_resync(other, oos[idx], oos_va[idx]);
+ _sh_resync(other, oos[idx], oos_va[idx], oos_snapshot[idx]);
oos[idx] = _mfn(INVALID_MFN);
}
}
0, /* SH_type_l3_64_shadow */
0, /* SH_type_l4_64_shadow */
2, /* SH_type_p2m_table */
- 0 /* SH_type_monitor_table */
+ 0, /* SH_type_monitor_table */
+ 0 /* SH_type_oos_snapshot */
};
ASSERT(shadow_type < SH_type_unused);
return type_to_order[shadow_type];
for ( i = 0; i < SHADOW_OOS_FT_HASH * SHADOW_OOS_FT_ENTRIES; i++ )
v->arch.paging.shadow.oos_fixups[i].gmfn = _mfn(INVALID_MFN);
}
+
+ if ( mfn_x(v->arch.paging.shadow.oos_snapshot[0]) == INVALID_MFN )
+ {
+ int i;
+ for(i = 0; i < SHADOW_OOS_PAGES; i++)
+ {
+ shadow_prealloc(d, SH_type_oos_snapshot, 1);
+ v->arch.paging.shadow.oos_snapshot[i] =
+ shadow_alloc(d, SH_type_oos_snapshot, 0);
+ }
+ }
#endif /* OOS */
// Valid transitions handled by this function:
free_xenheap_pages(v->arch.paging.shadow.oos_fixups,
SHADOW_OOS_FT_ORDER);
}
+
+ {
+ int i;
+ mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
+ for(i = 0; i < SHADOW_OOS_PAGES; i++)
+ if ( mfn_valid(oos_snapshot[i]) )
+ shadow_free(d, oos_snapshot[i]);
+ }
#endif /* OOS */
}
#endif /* (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC)) */
mfn_t gmfn;
p2m_type_t p2mt;
int result = 0;
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+ mfn_t gl1mfn;
+#endif /* OOS */
perfc_incr(shadow_validate_gl1e_calls);
gmfn = gfn_to_mfn(v->domain, gfn, &p2mt);
l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt);
-
result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn);
+
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+ gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->backpointer);
+ if ( mfn_valid(gl1mfn)
+ && mfn_is_out_of_sync(gl1mfn) )
+ {
+ /* Update the OOS snapshot. */
+ mfn_t snpmfn = oos_snapshot_lookup(v, gl1mfn);
+ guest_l1e_t *snp;
+
+ ASSERT(mfn_valid(snpmfn));
+
+ snp = sh_map_domain_page(snpmfn);
+ snp[guest_index(new_ge)] = new_gl1e;
+ sh_unmap_domain_page(snp);
+ }
+#endif /* OOS */
+
return result;
}
* revalidates the guest entry that corresponds to it.
* N.B. This function is called with the vcpu that unsynced the page,
* *not* the one that is causing it to be resynced. */
-void sh_resync_l1(struct vcpu *v, mfn_t gmfn)
+void sh_resync_l1(struct vcpu *v, mfn_t gl1mfn, mfn_t snpmfn)
{
mfn_t sl1mfn;
shadow_l1e_t *sl1p;
- guest_l1e_t *gl1p, *gp;
+ guest_l1e_t *gl1p, *gp, *snp;
int rc = 0;
- sl1mfn = get_shadow_status(v, gmfn, SH_type_l1_shadow);
+ ASSERT(mfn_valid(snpmfn));
+
+ sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
ASSERT(mfn_valid(sl1mfn)); /* Otherwise we would not have been called */
- gp = sh_map_domain_page(gmfn);
+ snp = sh_map_domain_page(snpmfn);
+ gp = sh_map_domain_page(gl1mfn);
gl1p = gp;
- SHADOW_FOREACH_L1E(sl1mfn, sl1p, &gl1p, 0, {
- rc |= validate_gl1e(v, gl1p, sl1mfn, sl1p);
+ SHADOW_FOREACH_L1E(sl1mfn, sl1p, &gl1p, 0, {
+ guest_l1e_t gl1e = *gl1p;
+ guest_l1e_t *snpl1p = (guest_l1e_t *)snp + guest_index(gl1p);
+
+ if ( memcmp(snpl1p, &gl1e, sizeof(gl1e)) )
+ {
+ gfn_t gfn;
+ mfn_t gmfn;
+ p2m_type_t p2mt;
+ shadow_l1e_t nsl1e;
+
+ gfn = guest_l1e_get_gfn(gl1e);
+ gmfn = gfn_to_mfn(v->domain, gfn, &p2mt);
+ l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch, p2mt);
+ rc |= shadow_set_l1e(v, sl1p, nsl1e, sl1mfn);
+
+ *snpl1p = gl1e;
+ }
});
sh_unmap_domain_page(gp);
+ sh_unmap_domain_page(snp);
/* Setting shadow L1 entries should never need us to flush the TLB */
ASSERT(!(rc & SHADOW_SET_FLUSH));
shadow_l1e_t sl1e;
u32 gflags;
p2m_type_t p2mt;
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+ guest_l1e_t *snpl1p = NULL;
+#endif /* OOS */
+
/* Prefetch no further than the end of the _shadow_ l1 MFN */
dist = (PAGE_SIZE - ((unsigned long)ptr_sl1e & ~PAGE_MASK)) / sizeof sl1e;
/* Normal guest page; grab the next guest entry */
gl1p = sh_map_domain_page(gw->l1mfn);
gl1p += guest_l1_table_offset(gw->va);
+
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+ if ( mfn_is_out_of_sync(gw->l1mfn) )
+ {
+ mfn_t snpmfn = oos_snapshot_lookup(v, gw->l1mfn);
+
+ ASSERT(mfn_valid(snpmfn));
+ snpl1p = sh_map_domain_page(snpmfn);
+ snpl1p += guest_l1_table_offset(gw->va);
+ }
+#endif /* OOS */
}
for ( i = 1; i < dist ; i++ )
/* Propagate the entry. */
l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt);
(void) shadow_set_l1e(v, ptr_sl1e + i, sl1e, sl1mfn);
+
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+ if ( snpl1p != NULL )
+ snpl1p[i] = gl1e;
+#endif /* OOS */
}
if ( gl1p != NULL )
sh_unmap_domain_page(gl1p);
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+ if ( snpl1p != NULL )
+ sh_unmap_domain_page(snpl1p);
+#endif /* OOS */
}
#endif /* SHADOW_OPTIMIZATIONS & SHOPT_PREFETCH */
l1e_propagate_from_guest(v, gw.l1e, gmfn, &sl1e, ft, p2mt);
r = shadow_set_l1e(v, ptr_sl1e, sl1e, sl1mfn);
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+ if ( mfn_valid(gw.l1mfn)
+ && mfn_is_out_of_sync(gw.l1mfn) )
+ {
+ /* Update the OOS snapshot. */
+ mfn_t snpmfn = oos_snapshot_lookup(v, gw.l1mfn);
+ guest_l1e_t *snp;
+
+ ASSERT(mfn_valid(snpmfn));
+
+ snp = sh_map_domain_page(snpmfn);
+ snp[guest_l1_table_offset(va)] = gw.l1e;
+ sh_unmap_domain_page(snp);
+ }
+#endif /* OOS */
+
#if SHADOW_OPTIMIZATIONS & SHOPT_PREFETCH
/* Prefetch some more shadow entries */
sh_prefetch(v, &gw, ptr_sl1e, sl1mfn);