ia64/xen-unstable

changeset 19237:07e65892fc8e

[VTD] Utilise the snoop control capability in shadow with VT-d code

We compute the shadow PAT index in leaf page entries now as:
1) No VT-d assigned: let shadow PAT index as WB, handled already
in shadow code before.
2) direct assigned MMIO area: let shadow code compute the shadow
PAT with gMTRR=UC and gPAT value.
3) Snoop control enable: let shadow PAT index as WB.
4) Snoop control disable: let shadow code compute the shadow
PAT with gMTRR and gPAT, handled already in shadow code before

Signed-off-by: Xin, Xiaohui <xiaohui.xin@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Feb 20 11:11:40 2009 +0000 (2009-02-20)
parents 9559343fe5e8
children b749d0aba17f
files xen/arch/x86/hvm/mtrr.c xen/arch/x86/mm/shadow/multi.c xen/include/asm-x86/mtrr.h
line diff
     1.1 --- a/xen/arch/x86/hvm/mtrr.c	Fri Feb 20 11:09:46 2009 +0000
     1.2 +++ b/xen/arch/x86/hvm/mtrr.c	Fri Feb 20 11:11:40 2009 +0000
     1.3 @@ -351,11 +351,18 @@ static uint8_t page_pat_type(uint64_t pa
     1.4  static uint8_t effective_mm_type(struct mtrr_state *m,
     1.5                                   uint64_t pat,
     1.6                                   paddr_t gpa,
     1.7 -                                 uint32_t pte_flags)
     1.8 +                                 uint32_t pte_flags,
     1.9 +                                 uint8_t gmtrr_mtype)
    1.10  {
    1.11      uint8_t mtrr_mtype, pat_value, effective;
    1.12 -
    1.13 -    mtrr_mtype = get_mtrr_type(m, gpa);
    1.14 +   
    1.15 +    /* if get_pat_flags() gives a dedicated MTRR type,
    1.16 +     * just use it
    1.17 +     */ 
    1.18 +    if ( gmtrr_mtype == NO_HARDCODE_MEM_TYPE )
    1.19 +        mtrr_mtype = get_mtrr_type(m, gpa);
    1.20 +    else
    1.21 +        mtrr_mtype = gmtrr_mtype;
    1.22  
    1.23      pat_value = page_pat_type(pat, pte_flags);
    1.24  
    1.25 @@ -367,7 +374,8 @@ static uint8_t effective_mm_type(struct 
    1.26  uint32_t get_pat_flags(struct vcpu *v,
    1.27                         uint32_t gl1e_flags,
    1.28                         paddr_t gpaddr,
    1.29 -                       paddr_t spaddr)
    1.30 +                       paddr_t spaddr,
    1.31 +                       uint8_t gmtrr_mtype)
    1.32  {
    1.33      uint8_t guest_eff_mm_type;
    1.34      uint8_t shadow_mtrr_type;
    1.35 @@ -378,7 +386,8 @@ uint32_t get_pat_flags(struct vcpu *v,
    1.36      /* 1. Get the effective memory type of guest physical address,
    1.37       * with the pair of guest MTRR and PAT
    1.38       */
    1.39 -    guest_eff_mm_type = effective_mm_type(g, pat, gpaddr, gl1e_flags);
    1.40 +    guest_eff_mm_type = effective_mm_type(g, pat, gpaddr, 
    1.41 +                                          gl1e_flags, gmtrr_mtype);
    1.42      /* 2. Get the memory type of host physical address, with MTRR */
    1.43      shadow_mtrr_type = get_mtrr_type(&mtrr_state, spaddr);
    1.44  
     2.1 --- a/xen/arch/x86/mm/shadow/multi.c	Fri Feb 20 11:09:46 2009 +0000
     2.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Fri Feb 20 11:11:40 2009 +0000
     2.3 @@ -546,15 +546,32 @@ static always_inline void
     2.4           !is_xen_heap_mfn(mfn_x(target_mfn)) )
     2.5      {
     2.6          unsigned int type;
     2.7 +
     2.8 +        /* compute the PAT index for shadow page entry when VT-d is enabled
     2.9 +         * and device assigned. 
    2.10 +         * 1) direct MMIO: compute the PAT index with gMTRR=UC and gPAT.
    2.11 +         * 2) if enables snoop control, compute the PAT index as WB.
    2.12 +         * 3) if disables snoop control, compute the PAT index with
    2.13 +         *    gMTRR and gPAT.
    2.14 +         */
    2.15          if ( hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), &type) )
    2.16              sflags |= pat_type_2_pte_flags(type);
    2.17          else if ( d->arch.hvm_domain.is_in_uc_mode )
    2.18              sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
    2.19 +        else if ( p2mt == p2m_mmio_direct )
    2.20 +            sflags |= get_pat_flags(v,
    2.21 +                                    gflags,
    2.22 +                                    gfn_to_paddr(target_gfn),
    2.23 +                                    ((paddr_t)mfn_x(target_mfn)) << PAGE_SHIFT,
    2.24 +                                    MTRR_TYPE_UNCACHABLE); 
    2.25 +        else if ( iommu_snoop )
    2.26 +            sflags |= pat_type_2_pte_flags(PAT_TYPE_WRBACK);
    2.27          else
    2.28              sflags |= get_pat_flags(v,
    2.29                                      gflags,
    2.30                                      gfn_to_paddr(target_gfn),
    2.31 -                                    ((paddr_t)mfn_x(target_mfn)) << PAGE_SHIFT);
    2.32 +                                    ((paddr_t)mfn_x(target_mfn)) << PAGE_SHIFT,
    2.33 +                                    NO_HARDCODE_MEM_TYPE);
    2.34      }
    2.35  
    2.36      // Set the A&D bits for higher level shadows.
     3.1 --- a/xen/include/asm-x86/mtrr.h	Fri Feb 20 11:09:46 2009 +0000
     3.2 +++ b/xen/include/asm-x86/mtrr.h	Fri Feb 20 11:11:40 2009 +0000
     3.3 @@ -11,6 +11,7 @@
     3.4  #define MTRR_TYPE_WRBACK     6
     3.5  #define MTRR_NUM_TYPES       7
     3.6  #define MEMORY_NUM_TYPES     MTRR_NUM_TYPES
     3.7 +#define NO_HARDCODE_MEM_TYPE    MTRR_NUM_TYPES
     3.8  
     3.9  #define NORMAL_CACHE_MODE          0
    3.10  #define NO_FILL_CACHE_MODE         2
    3.11 @@ -63,7 +64,7 @@ extern int mtrr_del(int reg, unsigned lo
    3.12  extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
    3.13  extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
    3.14  extern u32 get_pat_flags(struct vcpu *v, u32 gl1e_flags, paddr_t gpaddr,
    3.15 -                  paddr_t spaddr);
    3.16 +                  paddr_t spaddr, uint8_t gmtrr_mtype);
    3.17  extern uint8_t epte_get_entry_emt(
    3.18      struct domain *d, unsigned long gfn, unsigned long mfn,
    3.19      uint8_t *igmt, int direct_mmio);