/* Is the page already shadowed? */
if ( !test_and_set_bit(_PGC_page_table, &page->count_info) )
+ {
page->shadow_flags = 0;
+ if ( is_hvm_domain(d) )
+ page->pagetable_dying = false;
+ }
- ASSERT(!test_bit(type, &page->shadow_flags));
- set_bit(type, &page->shadow_flags);
+ ASSERT(!(page->shadow_flags & (1u << type)));
+ page->shadow_flags |= 1u << type;
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_PROMOTE);
}
struct page_info *page = mfn_to_page(gmfn);
ASSERT(test_bit(_PGC_page_table, &page->count_info));
- ASSERT(test_bit(type, &page->shadow_flags));
+ ASSERT(page->shadow_flags & (1u << type));
- clear_bit(type, &page->shadow_flags);
+ page->shadow_flags &= ~(1u << type);
if ( (page->shadow_flags & SHF_page_type_mask) == 0 )
{
if ( !fast && all && (pg->count_info & PGC_page_table) )
{
printk(XENLOG_G_ERR "can't find all shadows of mfn %"PRI_mfn
- " (shadow_flags=%08x)\n", mfn_x(gmfn), pg->shadow_flags);
+ " (shadow_flags=%04x)\n", mfn_x(gmfn), pg->shadow_flags);
domain_crash(d);
}
/* Unshadow if we are writing to a toplevel pagetable that is
* flagged as a dying process, and that is not currently used. */
- if ( sh_mfn_is_a_page_table(gmfn)
- && (mfn_to_page(gmfn)->shadow_flags & SHF_pagetable_dying) )
+ if ( sh_mfn_is_a_page_table(gmfn) && is_hvm_domain(d) &&
+ mfn_to_page(gmfn)->pagetable_dying )
{
int used = 0;
struct vcpu *tmp;
ASSERT(mfn_valid(smfn));
/* Remember if we've been told that this process is being torn down */
- if ( curr->domain == d )
+ if ( curr->domain == d && is_hvm_domain(d) )
curr->arch.paging.shadow.pagetable_dying
- = !!(mfn_to_page(gmfn)->shadow_flags & SHF_pagetable_dying);
+ = mfn_to_page(gmfn)->pagetable_dying;
sp = mfn_to_page(smfn);
: shadow_hash_lookup(d, mfn_x(gmfn), SH_type_l2_pae_shadow);
}
- if ( mfn_valid(smfn) )
+ if ( mfn_valid(smfn) && is_hvm_domain(d) )
{
gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
- mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
+ mfn_to_page(gmfn)->pagetable_dying = true;
shadow_unhook_mappings(d, smfn, 1/* user pages only */);
flush = 1;
}
smfn = shadow_hash_lookup(d, mfn_x(gmfn), SH_type_l4_64_shadow);
#endif
- if ( mfn_valid(smfn) )
+ if ( mfn_valid(smfn) && is_hvm_domain(d) )
{
- mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
+ mfn_to_page(gmfn)->pagetable_dying = true;
shadow_unhook_mappings(d, smfn, 1/* user pages only */);
/* Now flush the TLB: we removed toplevel mappings. */
flush_tlb_mask(d->dirty_cpumask);
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */
-#define SHF_pagetable_dying (1u<<31)
-
static inline int sh_page_has_multiple_shadows(struct page_info *pg)
{
u32 shadows;
* Guest pages with a shadow. This does not conflict with
* tlbflush_timestamp since page table pages are explicitly not
* tracked for TLB-flush avoidance when a guest runs in shadow mode.
+ *
+ * pagetable_dying is used for HVM domains only. The layout here has
+ * to avoid re-use of the space used by linear_pt_count, which (only)
+ * PV guests use.
*/
- u32 shadow_flags;
+ struct {
+ uint16_t shadow_flags;
+ bool pagetable_dying;
+ };
/* When in use as a shadow, next shadow in this hash chain. */
__pdx_t next_shadow;