From 94e709da726488b4541424d34ec5bc6f33fce646 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Wed, 31 Oct 2007 10:07:42 +0000 Subject: [PATCH] hvm: Fail attempts to add pages to guest pseudophys memory map above 4GB when running with AMD NPT on PAE host. From: Wei Huang Signed-off-by: Keir Fraser --- xen/arch/x86/hvm/hvm.c | 7 ++++--- xen/arch/x86/mm/p2m.c | 38 +++++++++++++++++++++++++++----------- xen/common/memory.c | 6 ++++-- xen/include/asm-x86/p2m.h | 13 ++++++++----- 4 files changed, 43 insertions(+), 21 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index dcdcceb841..89a90f8d73 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -53,10 +53,11 @@ /* * Xen command-line option to allow/disallow hardware-assisted paging. * Since the phys-to-machine table of AMD NPT is in host format, 32-bit Xen - * could only support guests using NPT with up to a 4GB memory map. Therefore - * we only allow HAP by default on 64-bit Xen. + * can only support guests using NPT with up to a 4GB memory map. Therefore + * we disallow HAP by default on PAE Xen (by default we want to support an + * 8GB pseudophysical memory map for HVM guests on a PAE host). */ -static int opt_hap_permitted = (BITS_PER_LONG == 8); +static int opt_hap_permitted = (CONFIG_PAGING_LEVELS != 3); boolean_param("hap", opt_hap_permitted); int hvm_enabled __read_mostly; diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 0dfdaaceb5..ae158fdac5 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -219,15 +219,17 @@ set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t p2mt) goto out; #endif #if CONFIG_PAGING_LEVELS >= 3 - // When using PAE Xen, we only allow 33 bits of pseudo-physical - // address in translated guests (i.e. 8 GBytes). This restriction - // comes from wanting to map the P2M table into the 16MB RO_MPT hole - // in Xen's address space for translated PV guests. - // + /* + * When using PAE Xen, we only allow 33 bits of pseudo-physical + * address in translated guests (i.e. 8 GBytes). This restriction + * comes from wanting to map the P2M table into the 16MB RO_MPT hole + * in Xen's address space for translated PV guests. + * When using AMD's NPT on PAE Xen, we are restricted to 4GB. + */ if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn, L3_PAGETABLE_SHIFT - PAGE_SHIFT, - (CONFIG_PAGING_LEVELS == 3 - ? 8 + ((CONFIG_PAGING_LEVELS == 3) + ? (hvm_funcs.hap_supported ? 4 : 8) : L3_PAGETABLE_ENTRIES), PGT_l2_page_table) ) goto out; @@ -686,16 +688,26 @@ guest_physmap_remove_page(struct domain *d, unsigned long gfn, p2m_unlock(d); } -void +int guest_physmap_add_entry(struct domain *d, unsigned long gfn, unsigned long mfn, p2m_type_t t) { unsigned long ogfn; p2m_type_t ot; mfn_t omfn; + int rc = 0; if ( !paging_mode_translate(d) ) - return; + return -EINVAL; + +#if CONFIG_PAGING_LEVELS == 3 + /* 32bit PAE nested paging does not support over 4GB guest due to + * hardware translation limit. This limitation is checked by comparing + * gfn with 0xfffffUL. + */ + if ( paging_mode_hap(d) && (gfn > 0xfffffUL) ) + return -EINVAL; +#endif p2m_lock(d); audit_p2m(d); @@ -735,18 +747,22 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn, if ( mfn_valid(_mfn(mfn)) ) { - set_p2m_entry(d, gfn, _mfn(mfn), t); + if ( !set_p2m_entry(d, gfn, _mfn(mfn), t) ) + rc = -EINVAL; set_gpfn_from_mfn(mfn, gfn); } else { gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n", gfn, mfn); - set_p2m_entry(d, gfn, _mfn(INVALID_MFN), p2m_invalid); + if ( !set_p2m_entry(d, gfn, _mfn(INVALID_MFN), p2m_invalid) ) + rc = -EINVAL; } audit_p2m(d); p2m_unlock(d); + + return rc; } /* Walk the whole p2m table, changing any entries of the old type diff --git a/xen/common/memory.c b/xen/common/memory.c index 783de29749..704497944a 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -131,7 +131,8 @@ static void populate_physmap(struct memop_args *a) if ( unlikely(paging_mode_translate(d)) ) { for ( j = 0; j < (1 << a->extent_order); j++ ) - guest_physmap_add_page(d, gpfn + j, mfn + j); + if ( guest_physmap_add_page(d, gpfn + j, mfn + j) ) + goto out; } else { @@ -445,8 +446,9 @@ static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg) mfn = page_to_mfn(page); if ( unlikely(paging_mode_translate(d)) ) { + /* Ignore failure here. There's nothing we can do. */ for ( k = 0; k < (1UL << exch.out.extent_order); k++ ) - guest_physmap_add_page(d, gpfn + k, mfn + k); + (void)guest_physmap_add_page(d, gpfn + k, mfn + k); } else { diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index c280169428..38a9cec7bd 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -201,14 +201,17 @@ int p2m_alloc_table(struct domain *d, void p2m_teardown(struct domain *d); /* Add a page to a domain's p2m table */ -void guest_physmap_add_entry(struct domain *d, unsigned long gfn, +int guest_physmap_add_entry(struct domain *d, unsigned long gfn, unsigned long mfn, p2m_type_t t); -/* Untyped version for RAM only, for compatibility */ -static inline void guest_physmap_add_page(struct domain *d, unsigned long gfn, - unsigned long mfn) +/* Untyped version for RAM only, for compatibility + * + * Return 0 for success + */ +static inline int guest_physmap_add_page(struct domain *d, unsigned long gfn, + unsigned long mfn) { - guest_physmap_add_entry(d, gfn, mfn, p2m_ram_rw); + return guest_physmap_add_entry(d, gfn, mfn, p2m_ram_rw); } /* Remove a page from a domain's p2m table */ -- 2.39.5