From 8b17648339ba801c4c7937b5f13dd25068e54e60 Mon Sep 17 00:00:00 2001 From: Tim Deegan Date: Mon, 14 Mar 2016 11:05:48 +0000 Subject: [PATCH] x86: limit GFNs to 32 bits for shadowed superpages. Superpage shadows store the shadowed GFN in the backpointer field, which for non-BIGMEM builds is 32 bits wide. Shadowing a superpage mapping of a guest-physical address above 2^44 would lead to the GFN being truncated there, and a crash when we come to remove the shadow from the hash table. Track the valid width of a GFN for each guest, including reporting it through CPUID, and enforce it in the shadow pagetables. Set the maximum witth to 32 for guests where this truncation could occur. This is XSA-173. Reported-by: Ling Liu Signed-off-by: Tim Deegan Signed-off-by: Jan Beulich --- xen/arch/x86/cpu/common.c | 8 ++++++-- xen/arch/x86/hvm/hvm.c | 3 +-- xen/arch/x86/mm/guest_walk.c | 20 +++++++------------- xen/arch/x86/mm/hap/hap.c | 2 ++ xen/arch/x86/mm/p2m.c | 6 ++++++ xen/arch/x86/mm/shadow/common.c | 10 ++++++++++ xen/arch/x86/mm/shadow/multi.c | 3 ++- xen/include/asm-x86/domain.h | 3 +++ xen/include/asm-x86/guest_pt.h | 18 ++++++++++-------- xen/include/asm-x86/processor.h | 2 ++ xen/include/asm-x86/x86_64/page.h | 6 ++++++ 11 files changed, 55 insertions(+), 26 deletions(-) diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c index fe6eab49cc..f664341203 100644 --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -45,6 +45,7 @@ struct cpuidmasks __read_mostly cpuidmask_defaults; const struct cpu_dev *__read_mostly cpu_devs[X86_VENDOR_NUM] = {}; unsigned int paddr_bits __read_mostly = 36; +unsigned int hap_paddr_bits __read_mostly = 36; /* * Default host IA32_CR_PAT value to cover all memory types. @@ -236,8 +237,11 @@ static void __init early_cpu_detect(void) c->x86_capability[cpufeat_word(X86_FEATURE_FPU)] = edx; c->x86_capability[cpufeat_word(X86_FEATURE_SSE3)] = ecx; - if ( cpuid_eax(0x80000000) >= 0x80000008 ) - paddr_bits = cpuid_eax(0x80000008) & 0xff; + if ( cpuid_eax(0x80000000) >= 0x80000008 ) { + eax = cpuid_eax(0x80000008); + paddr_bits = eax & 0xff; + hap_paddr_bits = ((eax >> 16) & 0xff) ?: paddr_bits; + } } static void generic_identify(struct cpuinfo_x86 *c) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index f24126d8c8..e9d4c6b06c 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3504,8 +3504,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, break; case 0x80000008: - count = cpuid_eax(0x80000008); - count = (count >> 16) & 0xff ?: count & 0xff; + count = d->arch.paging.gfn_bits + PAGE_SHIFT; if ( (*eax & 0xff) > count ) *eax = (*eax & ~0xff) | count; diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c index 21cabbab15..625ac4dfcc 100644 --- a/xen/arch/x86/mm/guest_walk.c +++ b/xen/arch/x86/mm/guest_walk.c @@ -342,20 +342,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, flags &= ~_PAGE_PAT; if ( gfn_x(start) & GUEST_L2_GFN_MASK & ~0x1 ) - { -#if GUEST_PAGING_LEVELS == 2 - /* - * Note that _PAGE_INVALID_BITS is zero in this case, yielding a - * no-op here. - * - * Architecturally, the walk should fail if bit 21 is set (others - * aren't being checked at least in PSE36 mode), but we'll ignore - * this here in order to avoid specifying a non-natural, non-zero - * _PAGE_INVALID_BITS value just for that case. - */ -#endif rc |= _PAGE_INVALID_BITS; - } + /* Increment the pfn by the right number of 4k pages. * Mask out PAT and invalid bits. */ start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) + @@ -441,5 +429,11 @@ set_ad: put_page(mfn_to_page(mfn_x(gw->l1mfn))); } + /* If this guest has a restricted physical address space then the + * target GFN must fit within it. */ + if ( !(rc & _PAGE_PRESENT) + && gfn_x(guest_l1e_get_gfn(gw->l1e)) >> d->arch.paging.gfn_bits ) + rc |= _PAGE_INVALID_BITS; + return rc; } diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index f173539549..4ab99bb607 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -448,6 +448,8 @@ void hap_domain_init(struct domain *d) { INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist); + d->arch.paging.gfn_bits = hap_paddr_bits - PAGE_SHIFT; + /* Use HAP logdirty mechanism. */ paging_log_dirty_init(d, hap_enable_log_dirty, hap_disable_log_dirty, diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index b3fce1b7ab..9e82256f3d 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -2081,6 +2081,12 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn, { struct page_info *page; + if ( gfn_x(gfn) >> p2m->domain->arch.paging.gfn_bits ) + { + *rc = _PAGE_INVALID_BIT; + return NULL; + } + /* Translate the gfn, unsharing if shared. */ page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), p2mt, NULL, q); if ( p2m_is_paging(*p2mt) ) diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index ec87fb4b51..4e06c1364f 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -51,6 +51,16 @@ int shadow_domain_init(struct domain *d, unsigned int domcr_flags) INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist); INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows); + d->arch.paging.gfn_bits = paddr_bits - PAGE_SHIFT; +#ifndef CONFIG_BIGMEM + /* + * Shadowed superpages store GFNs in 32-bit page_info fields. + * Note that we cannot use guest_supports_superpages() here. + */ + if ( !is_pv_domain(d) || opt_allow_superpage ) + d->arch.paging.gfn_bits = 32; +#endif + /* Use shadow pagetables for log-dirty support */ paging_log_dirty_init(d, sh_enable_log_dirty, sh_disable_log_dirty, sh_clean_dirty_bitmap); diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index e5c8499a66..428be374b8 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -540,7 +540,8 @@ _sh_propagate(struct vcpu *v, ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3); /* Check there's something for the shadows to map to */ - if ( !p2m_is_valid(p2mt) && !p2m_is_grant(p2mt) ) + if ( (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) + || gfn_x(target_gfn) >> d->arch.paging.gfn_bits ) { *sp = shadow_l1e_empty(); goto done; diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index d393ed24aa..165e533ab3 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -194,6 +194,9 @@ struct paging_domain { /* log dirty support */ struct log_dirty_domain log_dirty; + /* Number of valid bits in a gfn. */ + unsigned int gfn_bits; + /* preemption handling */ struct { const struct domain *dom; diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h index eb29e622f6..1f6c2ae357 100644 --- a/xen/include/asm-x86/guest_pt.h +++ b/xen/include/asm-x86/guest_pt.h @@ -222,15 +222,17 @@ guest_supports_nx(struct vcpu *v) } -/* Some bits are invalid in any pagetable entry. */ -#if GUEST_PAGING_LEVELS == 2 -#define _PAGE_INVALID_BITS (0) -#elif GUEST_PAGING_LEVELS == 3 -#define _PAGE_INVALID_BITS \ - get_pte_flags(((1ull<<63) - 1) & ~((1ull<