ia64/xen-unstable

changeset 16291:2717128cbdd1

hvm: Fail attempts to add pages to guest pseudophys memory map above
4GB when running with AMD NPT on PAE host.
From: Wei Huang <Wei.Huang2@amd.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Wed Oct 31 10:07:42 2007 +0000 (2007-10-31)
parents c7d5d229f191
children e2d76fb12ae2
files xen/arch/x86/hvm/hvm.c xen/arch/x86/mm/p2m.c xen/common/memory.c xen/include/asm-x86/p2m.h
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Wed Oct 31 09:36:45 2007 +0000
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Oct 31 10:07:42 2007 +0000
     1.3 @@ -53,10 +53,11 @@
     1.4  /*
     1.5   * Xen command-line option to allow/disallow hardware-assisted paging.
     1.6   * Since the phys-to-machine table of AMD NPT is in host format, 32-bit Xen
     1.7 - * could only support guests using NPT with up to a 4GB memory map. Therefore
     1.8 - * we only allow HAP by default on 64-bit Xen.
     1.9 + * can only support guests using NPT with up to a 4GB memory map. Therefore
    1.10 + * we disallow HAP by default on PAE Xen (by default we want to support an
    1.11 + * 8GB pseudophysical memory map for HVM guests on a PAE host).
    1.12   */
    1.13 -static int opt_hap_permitted = (BITS_PER_LONG == 8);
    1.14 +static int opt_hap_permitted = (CONFIG_PAGING_LEVELS != 3);
    1.15  boolean_param("hap", opt_hap_permitted);
    1.16  
    1.17  int hvm_enabled __read_mostly;
     2.1 --- a/xen/arch/x86/mm/p2m.c	Wed Oct 31 09:36:45 2007 +0000
     2.2 +++ b/xen/arch/x86/mm/p2m.c	Wed Oct 31 10:07:42 2007 +0000
     2.3 @@ -219,15 +219,17 @@ set_p2m_entry(struct domain *d, unsigned
     2.4          goto out;
     2.5  #endif
     2.6  #if CONFIG_PAGING_LEVELS >= 3
     2.7 -    // When using PAE Xen, we only allow 33 bits of pseudo-physical
     2.8 -    // address in translated guests (i.e. 8 GBytes).  This restriction
     2.9 -    // comes from wanting to map the P2M table into the 16MB RO_MPT hole
    2.10 -    // in Xen's address space for translated PV guests.
    2.11 -    //
    2.12 +    /*
    2.13 +     * When using PAE Xen, we only allow 33 bits of pseudo-physical
    2.14 +     * address in translated guests (i.e. 8 GBytes).  This restriction
    2.15 +     * comes from wanting to map the P2M table into the 16MB RO_MPT hole
    2.16 +     * in Xen's address space for translated PV guests.
    2.17 +     * When using AMD's NPT on PAE Xen, we are restricted to 4GB.
    2.18 +     */
    2.19      if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
    2.20                           L3_PAGETABLE_SHIFT - PAGE_SHIFT,
    2.21 -                         (CONFIG_PAGING_LEVELS == 3
    2.22 -                          ? 8
    2.23 +                         ((CONFIG_PAGING_LEVELS == 3)
    2.24 +                          ? (hvm_funcs.hap_supported ? 4 : 8)
    2.25                            : L3_PAGETABLE_ENTRIES),
    2.26                           PGT_l2_page_table) )
    2.27          goto out;
    2.28 @@ -686,16 +688,26 @@ guest_physmap_remove_page(struct domain 
    2.29      p2m_unlock(d);
    2.30  }
    2.31  
    2.32 -void
    2.33 +int
    2.34  guest_physmap_add_entry(struct domain *d, unsigned long gfn,
    2.35                          unsigned long mfn, p2m_type_t t)
    2.36  {
    2.37      unsigned long ogfn;
    2.38      p2m_type_t ot;
    2.39      mfn_t omfn;
    2.40 +    int rc = 0;
    2.41  
    2.42      if ( !paging_mode_translate(d) )
    2.43 -        return;
    2.44 +        return -EINVAL;
    2.45 +
    2.46 +#if CONFIG_PAGING_LEVELS == 3
    2.47 +    /* 32bit PAE nested paging does not support over 4GB guest due to 
    2.48 +     * hardware translation limit. This limitation is checked by comparing
    2.49 +     * gfn with 0xfffffUL.
    2.50 +     */
    2.51 +    if ( paging_mode_hap(d) && (gfn > 0xfffffUL) )
    2.52 +        return -EINVAL;
    2.53 +#endif
    2.54  
    2.55      p2m_lock(d);
    2.56      audit_p2m(d);
    2.57 @@ -735,18 +747,22 @@ guest_physmap_add_entry(struct domain *d
    2.58  
    2.59      if ( mfn_valid(_mfn(mfn)) ) 
    2.60      {
    2.61 -        set_p2m_entry(d, gfn, _mfn(mfn), t);
    2.62 +        if ( !set_p2m_entry(d, gfn, _mfn(mfn), t) )
    2.63 +            rc = -EINVAL;
    2.64          set_gpfn_from_mfn(mfn, gfn);
    2.65      }
    2.66      else
    2.67      {
    2.68          gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
    2.69                   gfn, mfn);
    2.70 -        set_p2m_entry(d, gfn, _mfn(INVALID_MFN), p2m_invalid);
    2.71 +        if ( !set_p2m_entry(d, gfn, _mfn(INVALID_MFN), p2m_invalid) )
    2.72 +            rc = -EINVAL;
    2.73      }
    2.74  
    2.75      audit_p2m(d);
    2.76      p2m_unlock(d);
    2.77 +
    2.78 +    return rc;
    2.79  }
    2.80  
    2.81  /* Walk the whole p2m table, changing any entries of the old type
     3.1 --- a/xen/common/memory.c	Wed Oct 31 09:36:45 2007 +0000
     3.2 +++ b/xen/common/memory.c	Wed Oct 31 10:07:42 2007 +0000
     3.3 @@ -131,7 +131,8 @@ static void populate_physmap(struct memo
     3.4          if ( unlikely(paging_mode_translate(d)) )
     3.5          {
     3.6              for ( j = 0; j < (1 << a->extent_order); j++ )
     3.7 -                guest_physmap_add_page(d, gpfn + j, mfn + j);
     3.8 +                if ( guest_physmap_add_page(d, gpfn + j, mfn + j) )
     3.9 +                    goto out;
    3.10          }
    3.11          else
    3.12          {
    3.13 @@ -445,8 +446,9 @@ static long memory_exchange(XEN_GUEST_HA
    3.14              mfn = page_to_mfn(page);
    3.15              if ( unlikely(paging_mode_translate(d)) )
    3.16              {
    3.17 +                /* Ignore failure here. There's nothing we can do. */
    3.18                  for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
    3.19 -                    guest_physmap_add_page(d, gpfn + k, mfn + k);
    3.20 +                    (void)guest_physmap_add_page(d, gpfn + k, mfn + k);
    3.21              }
    3.22              else
    3.23              {
     4.1 --- a/xen/include/asm-x86/p2m.h	Wed Oct 31 09:36:45 2007 +0000
     4.2 +++ b/xen/include/asm-x86/p2m.h	Wed Oct 31 10:07:42 2007 +0000
     4.3 @@ -201,14 +201,17 @@ int p2m_alloc_table(struct domain *d,
     4.4  void p2m_teardown(struct domain *d);
     4.5  
     4.6  /* Add a page to a domain's p2m table */
     4.7 -void guest_physmap_add_entry(struct domain *d, unsigned long gfn,
     4.8 +int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
     4.9                               unsigned long mfn, p2m_type_t t);
    4.10  
    4.11 -/* Untyped version for RAM only, for compatibility */
    4.12 -static inline void guest_physmap_add_page(struct domain *d, unsigned long gfn,
    4.13 -                                          unsigned long mfn)
    4.14 +/* Untyped version for RAM only, for compatibility 
    4.15 + *
    4.16 + * Return 0 for success
    4.17 + */
    4.18 +static inline int guest_physmap_add_page(struct domain *d, unsigned long gfn,
    4.19 +                                         unsigned long mfn)
    4.20  {
    4.21 -    guest_physmap_add_entry(d, gfn, mfn, p2m_ram_rw);
    4.22 +    return guest_physmap_add_entry(d, gfn, mfn, p2m_ram_rw);
    4.23  }
    4.24  
    4.25  /* Remove a page from a domain's p2m table */