ia64/xen-unstable

changeset 15728:511c41a55045

hvm: More cleanups around paging interfaces.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Aug 08 10:34:03 2007 +0100 (2007-08-08)
parents 0f541efbb6d6
children da2c7dab1a3a
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/mm/hap/hap.c xen/include/asm-x86/hvm/svm/asid.h
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Tue Aug 07 17:30:09 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Aug 08 10:34:03 2007 +0100
     1.3 @@ -527,30 +527,26 @@ int hvm_set_cr3(unsigned long value)
     1.4  
     1.5      if ( paging_mode_hap(v->domain) )
     1.6      {
     1.7 -        v->arch.hvm_vcpu.guest_cr[3] = value;
     1.8 -        hvm_update_guest_cr3(v, value);
     1.9 -        goto success;
    1.10 -    }
    1.11 -
    1.12 -    if ( !hvm_paging_enabled(v) )
    1.13 -    {
    1.14 +        /* HAP mode. HAP-specific code does all the hard work. */
    1.15          v->arch.hvm_vcpu.guest_cr[3] = value;
    1.16 -        goto success;
    1.17 +        paging_update_cr3(v);
    1.18      }
    1.19 -
    1.20 -    if ( value == v->arch.hvm_vcpu.guest_cr[3] )
    1.21 +    else if ( !hvm_paging_enabled(v) )
    1.22      {
    1.23 -        /* 
    1.24 -         * This is simple TLB flush, implying the guest has removed some
    1.25 -         * translation or changed page attributes. Invalidate the shadow.
    1.26 -         */
    1.27 +        /* Shadow-mode, paging disabled. Just update guest CR3 value. */
    1.28 +        v->arch.hvm_vcpu.guest_cr[3] = value;
    1.29 +    }
    1.30 +    else if ( value == v->arch.hvm_vcpu.guest_cr[3] )
    1.31 +    {
    1.32 +        /* Shadow-mode TLB flush. Invalidate the shadow. */
    1.33          mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
    1.34          if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
    1.35              goto bad_cr3;
    1.36 +        paging_update_cr3(v);
    1.37      }
    1.38      else 
    1.39      {
    1.40 -        /* Make a shadow. Check that the PDBR is valid first. */
    1.41 +        /* Shadow-mode CR3 change. Check PDBR and then make a new shadow. */
    1.42          HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
    1.43          mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
    1.44          if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
    1.45 @@ -564,11 +560,9 @@ int hvm_set_cr3(unsigned long value)
    1.46  
    1.47          v->arch.hvm_vcpu.guest_cr[3] = value;
    1.48          HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
    1.49 +        paging_update_cr3(v);
    1.50      }
    1.51  
    1.52 -    paging_update_cr3(v);
    1.53 -
    1.54 - success:
    1.55      return 1;
    1.56  
    1.57   bad_cr3:
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Tue Aug 07 17:30:09 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Aug 08 10:34:03 2007 +0100
     2.3 @@ -481,7 +481,6 @@ int svm_vmcb_restore(struct vcpu *v, str
     2.4      }
     2.5  
     2.6      paging_update_paging_modes(v);
     2.7 -    svm_asid_g_update_paging(v);
     2.8  
     2.9      return 0;
    2.10   
    2.11 @@ -1680,10 +1679,7 @@ static int svm_set_cr0(unsigned long val
    2.12          vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
    2.13  
    2.14      if ( (value ^ old_value) & X86_CR0_PG )
    2.15 -    {
    2.16          paging_update_paging_modes(v);
    2.17 -        svm_asid_g_update_paging(v);
    2.18 -    }
    2.19  
    2.20      return 1;
    2.21  }
    2.22 @@ -1770,8 +1766,6 @@ static int mov_to_cr(int gpreg, int cr, 
    2.23              v->arch.hvm_vcpu.guest_cr[4] = value;
    2.24              vmcb->cr4 = value | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
    2.25              paging_update_paging_modes(v);
    2.26 -            /* signal paging update to ASID handler */
    2.27 -            svm_asid_g_update_paging (v);
    2.28              break;
    2.29          }
    2.30  
    2.31 @@ -1796,8 +1790,6 @@ static int mov_to_cr(int gpreg, int cr, 
    2.32                  if ( old_base_mfn )
    2.33                      put_page(mfn_to_page(old_base_mfn));
    2.34                  paging_update_paging_modes(v);
    2.35 -                /* signal paging update to ASID handler */
    2.36 -                svm_asid_g_update_paging (v);
    2.37  
    2.38                  HVM_DBG_LOG(DBG_LEVEL_VMMU, 
    2.39                              "Update CR3 value = %lx, mfn = %lx",
    2.40 @@ -1821,11 +1813,7 @@ static int mov_to_cr(int gpreg, int cr, 
    2.41           * all TLB entries except global entries.
    2.42           */
    2.43          if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
    2.44 -        {
    2.45              paging_update_paging_modes(v);
    2.46 -            /* signal paging update to ASID handler */
    2.47 -            svm_asid_g_update_paging (v);
    2.48 -        }
    2.49          break;
    2.50  
    2.51      case 8:
    2.52 @@ -2206,8 +2194,7 @@ void svm_handle_invlpg(const short invlp
    2.53      HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));
    2.54  
    2.55      paging_invlpg(v, g_vaddr);
    2.56 -    /* signal invplg to ASID handler */
    2.57 -    svm_asid_g_invlpg (v, g_vaddr);
    2.58 +    svm_asid_g_invlpg(v, g_vaddr);
    2.59  }
    2.60  
    2.61  
     3.1 --- a/xen/arch/x86/mm/hap/hap.c	Tue Aug 07 17:30:09 2007 +0100
     3.2 +++ b/xen/arch/x86/mm/hap/hap.c	Wed Aug 08 10:34:03 2007 +0100
     3.3 @@ -603,38 +603,22 @@ static int hap_invlpg(struct vcpu *v, un
     3.4      return 0;
     3.5  }
     3.6  
     3.7 -/*
     3.8 - * HAP guests do not need to take any action on CR3 writes (they are still
     3.9 - * intercepted, so that Xen's copy of the guest's CR3 can be kept in sync.)
    3.10 - */
    3.11  static void hap_update_cr3(struct vcpu *v, int do_locking)
    3.12  {
    3.13 +    hvm_update_guest_cr3(v, v->arch.hvm_vcpu.guest_cr[3]);
    3.14  }
    3.15  
    3.16  static void hap_update_paging_modes(struct vcpu *v)
    3.17  {
    3.18 -    struct domain *d;
    3.19 +    struct domain *d = v->domain;
    3.20  
    3.21 -    d = v->domain;
    3.22      hap_lock(d);
    3.23  
    3.24 -    /* update guest paging mode. Note that we rely on hvm functions to detect
    3.25 -     * guest's paging mode. So, make sure the shadow registers (CR0, CR4, EFER)
    3.26 -     * reflect guest's status correctly.
    3.27 -     */
    3.28 -    if ( hvm_paging_enabled(v) )
    3.29 -    {
    3.30 -        if ( hvm_long_mode_enabled(v) )
    3.31 -            v->arch.paging.mode = &hap_paging_long_mode;
    3.32 -        else if ( hvm_pae_enabled(v) )
    3.33 -            v->arch.paging.mode = &hap_paging_pae_mode;
    3.34 -        else
    3.35 -            v->arch.paging.mode = &hap_paging_protected_mode;
    3.36 -    }
    3.37 -    else
    3.38 -    {
    3.39 -        v->arch.paging.mode = &hap_paging_real_mode;
    3.40 -    }
    3.41 +    v->arch.paging.mode =
    3.42 +        !hvm_paging_enabled(v)   ? &hap_paging_real_mode :
    3.43 +        hvm_long_mode_enabled(v) ? &hap_paging_long_mode :
    3.44 +        hvm_pae_enabled(v)       ? &hap_paging_pae_mode  :
    3.45 +                                   &hap_paging_protected_mode;
    3.46  
    3.47      v->arch.paging.translate_enabled = hvm_paging_enabled(v);
    3.48  
    3.49 @@ -643,8 +627,12 @@ static void hap_update_paging_modes(stru
    3.50          mfn_t mmfn = hap_make_monitor_table(v);
    3.51          v->arch.monitor_table = pagetable_from_mfn(mmfn);
    3.52          make_cr3(v, mfn_x(mmfn));
    3.53 +        hvm_update_host_cr3(v);
    3.54      }
    3.55  
    3.56 +    /* CR3 is effectively updated by a mode change. Flush ASIDs, etc. */
    3.57 +    hvm_update_guest_cr3(v, v->arch.hvm_vcpu.guest_cr[3]);
    3.58 +
    3.59      hap_unlock(d);
    3.60  }
    3.61  
     4.1 --- a/xen/include/asm-x86/hvm/svm/asid.h	Tue Aug 07 17:30:09 2007 +0100
     4.2 +++ b/xen/include/asm-x86/hvm/svm/asid.h	Wed Aug 08 10:34:03 2007 +0100
     4.3 @@ -32,11 +32,6 @@ void svm_asid_init_vcpu(struct vcpu *v);
     4.4  void svm_asid_inv_asid(struct vcpu *v);
     4.5  void svm_asid_inc_generation(void);
     4.6  
     4.7 -static inline void svm_asid_g_update_paging(struct vcpu *v)
     4.8 -{
     4.9 -    svm_asid_inv_asid(v);
    4.10 -}
    4.11 -
    4.12  static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
    4.13  {
    4.14  #if 0