This is to avoid it overlapping the linear_pt_count field needed for PV
domains. Introduce a separate, HVM-only pagetable_dying field to replace
the sole one left in the upper 16 bits.
Note that the accesses to ->shadow_flags in shadow_{pro,de}mote() get
switched to non-atomic, non-bitops operations, as {test,set,clear}_bit()
are not allowed on uint16_t fields and hence their use would have
required ugly casts. This is fine because all updates of the field ought
to occur with the paging lock held, and other updates of it use |= and
&= as well (i.e. using atomic operations here didn't really guard
against potentially racing updates elsewhere).
This is part of XSA-280.
Reported-by: Prgmr.com Security <security@prgmr.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Tim Deegan <tim@xen.org>
master commit:
789589968ed90e82a832dbc60e958c76b787be7e
master date: 2018-11-20 14:59:54 +0100
/* Is the page already shadowed? */
if ( !test_and_set_bit(_PGC_page_table, &page->count_info) )
+ {
page->shadow_flags = 0;
+ if ( is_hvm_domain(d) )
+ page->pagetable_dying = 0;
+ }
- ASSERT(!test_bit(type, &page->shadow_flags));
- set_bit(type, &page->shadow_flags);
+ ASSERT(!(page->shadow_flags & (1u << type)));
+ page->shadow_flags |= 1u << type;
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_PROMOTE);
}
struct page_info *page = mfn_to_page(gmfn);
ASSERT(test_bit(_PGC_page_table, &page->count_info));
- ASSERT(test_bit(type, &page->shadow_flags));
+ ASSERT(page->shadow_flags & (1u << type));
- clear_bit(type, &page->shadow_flags);
+ page->shadow_flags &= ~(1u << type);
if ( (page->shadow_flags & SHF_page_type_mask) == 0 )
{
if ( !fast && all && (pg->count_info & PGC_page_table) )
{
SHADOW_ERROR("can't find all shadows of mfn %05lx "
- "(shadow_flags=%08x)\n",
+ "(shadow_flags=%04x)\n",
mfn_x(gmfn), pg->shadow_flags);
domain_crash(d);
}
/* Unshadow if we are writing to a toplevel pagetable that is
* flagged as a dying process, and that is not currently used. */
- if ( sh_mfn_is_a_page_table(gmfn)
- && (mfn_to_page(gmfn)->shadow_flags & SHF_pagetable_dying) )
+ if ( sh_mfn_is_a_page_table(gmfn) && is_hvm_domain(d) &&
+ mfn_to_page(gmfn)->pagetable_dying )
{
int used = 0;
struct vcpu *tmp;
ASSERT(mfn_valid(smfn));
/* Remember if we've been told that this process is being torn down */
- if ( curr->domain == d )
+ if ( curr->domain == d && is_hvm_domain(d) )
curr->arch.paging.shadow.pagetable_dying
- = !!(mfn_to_page(gmfn)->shadow_flags & SHF_pagetable_dying);
+ = mfn_to_page(gmfn)->pagetable_dying;
sp = mfn_to_page(smfn);
smfn = shadow_hash_lookup(d, mfn_x(gmfn), SH_type_l2_pae_shadow);
}
- if ( mfn_valid(smfn) )
+ if ( mfn_valid(smfn) && is_hvm_domain(d) )
{
gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
- mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
+ mfn_to_page(gmfn)->pagetable_dying = 1;
shadow_unhook_mappings(d, smfn, 1/* user pages only */);
flush = 1;
}
smfn = shadow_hash_lookup(d, mfn_x(gmfn), SH_type_l4_64_shadow);
#endif
- if ( mfn_valid(smfn) )
+ if ( mfn_valid(smfn) && is_hvm_domain(d) )
{
- mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
+ mfn_to_page(gmfn)->pagetable_dying = 1;
shadow_unhook_mappings(d, smfn, 1/* user pages only */);
/* Now flush the TLB: we removed toplevel mappings. */
flush_tlb_mask(d->domain_dirty_cpumask);
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */
-#define SHF_pagetable_dying (1u<<31)
-
static inline int sh_page_has_multiple_shadows(struct page_info *pg)
{
u32 shadows;
* Guest pages with a shadow. This does not conflict with
* tlbflush_timestamp since page table pages are explicitly not
* tracked for TLB-flush avoidance when a guest runs in shadow mode.
+ *
+ * pagetable_dying is used for HVM domains only. The layout here has
+ * to avoid re-use of the space used by linear_pt_count, which (only)
+ * PV guests use.
*/
- u32 shadow_flags;
+ struct {
+ uint16_t shadow_flags;
+ bool_t pagetable_dying;
+ };
/* When in use as a shadow, next shadow in this hash chain. */
__pdx_t next_shadow;