ia64/xen-unstable

changeset 15369:1feb91894e11

[HVM] HAP tidying.
Tighten up locking discipline, dead/useless code and unnecessary VMEXITS.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Jun 15 16:51:08 2007 +0100 (2007-06-15)
parents 56548d9a7ba7
children 093bc9dcbbca
files xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/mm/hap/hap.c xen/arch/x86/mm/p2m.c
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Fri Jun 15 16:30:10 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Fri Jun 15 16:51:08 2007 +0100
     1.3 @@ -236,6 +236,16 @@ static int construct_vmcb(struct vcpu *v
     1.4          vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
     1.5          vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
     1.6          vmcb->cr4 = arch_svm->cpu_shadow_cr4 = 0;
     1.7 +
     1.8 +        /* No point in intercepting CR0/3/4 reads, because the hardware 
     1.9 +         * will return the guest versions anyway. */
    1.10 +        vmcb->cr_intercepts &= ~(CR_INTERCEPT_CR0_READ
    1.11 +                                 |CR_INTERCEPT_CR3_READ
    1.12 +                                 |CR_INTERCEPT_CR4_READ);
    1.13 +
    1.14 +        /* No point in intercepting INVLPG if we don't have shadow pagetables 
    1.15 +         * that need to be fixed up. */
    1.16 +        vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_INVLPG;
    1.17      }
    1.18      else
    1.19      {
     2.1 --- a/xen/arch/x86/mm/hap/hap.c	Fri Jun 15 16:30:10 2007 +0100
     2.2 +++ b/xen/arch/x86/mm/hap/hap.c	Fri Jun 15 16:51:08 2007 +0100
     2.3 @@ -55,14 +55,14 @@
     2.4  /* hap code to call when log_dirty is enable. return 0 if no problem found. */
     2.5  int hap_enable_log_dirty(struct domain *d)
     2.6  {
     2.7 +    /* turn on PG_log_dirty bit in paging mode */
     2.8      hap_lock(d);
     2.9 -    /* turn on PG_log_dirty bit in paging mode */
    2.10      d->arch.paging.mode |= PG_log_dirty;
    2.11 +    hap_unlock(d);
    2.12 +
    2.13      /* set l1e entries of P2M table to NOT_WRITABLE. */
    2.14      p2m_set_flags_global(d, (_PAGE_PRESENT|_PAGE_USER));
    2.15 -    flush_tlb_all_pge();
    2.16 -    hap_unlock(d);
    2.17 -
    2.18 +    flush_tlb_mask(d->domain_dirty_cpumask);
    2.19      return 0;
    2.20  }
    2.21  
    2.22 @@ -70,19 +70,20 @@ int hap_disable_log_dirty(struct domain 
    2.23  {
    2.24      hap_lock(d);
    2.25      d->arch.paging.mode &= ~PG_log_dirty;
    2.26 +    hap_unlock(d);
    2.27 +
    2.28      /* set l1e entries of P2M table with normal mode */
    2.29 -    p2m_set_flags_global(d, __PAGE_HYPERVISOR|_PAGE_USER);
    2.30 -    hap_unlock(d);
    2.31 -    
    2.32 -    return 1;
    2.33 +    p2m_set_flags_global(d, __PAGE_HYPERVISOR|_PAGE_USER);    
    2.34 +    return 0;
    2.35  }
    2.36  
    2.37  void hap_clean_dirty_bitmap(struct domain *d)
    2.38  {
    2.39      /* mark physical memory as NOT_WRITEABLE and flush the TLB */
    2.40      p2m_set_flags_global(d, (_PAGE_PRESENT|_PAGE_USER));
    2.41 -    flush_tlb_all_pge();
    2.42 +    flush_tlb_mask(d->domain_dirty_cpumask);
    2.43  }
    2.44 +
    2.45  /************************************************/
    2.46  /*             HAP SUPPORT FUNCTIONS            */
    2.47  /************************************************/
    2.48 @@ -268,6 +269,7 @@ void hap_install_xen_entries_in_l2h(stru
    2.49  {
    2.50      struct domain *d = v->domain;
    2.51      l2_pgentry_t *sl2e;
    2.52 +    l3_pgentry_t *p2m;
    2.53  
    2.54      int i;
    2.55  
    2.56 @@ -290,23 +292,18 @@ void hap_install_xen_entries_in_l2h(stru
    2.57          sl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
    2.58              l2e_empty();
    2.59  
    2.60 -    if ( paging_mode_translate(d) )
    2.61 +    /* Install the domain-specific p2m table */
    2.62 +    ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
    2.63 +    p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
    2.64 +    for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
    2.65      {
    2.66 -        /* Install the domain-specific p2m table */
    2.67 -        l3_pgentry_t *p2m;
    2.68 -        ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
    2.69 -        p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
    2.70 -        for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
    2.71 -        {
    2.72 -            sl2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
    2.73 -                (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
    2.74 -                ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),
    2.75 -                                      __PAGE_HYPERVISOR)
    2.76 -                : l2e_empty();
    2.77 -        }
    2.78 -        hap_unmap_domain_page(p2m);
    2.79 +        sl2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
    2.80 +            (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
    2.81 +            ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),
    2.82 +                           __PAGE_HYPERVISOR)
    2.83 +            : l2e_empty();
    2.84      }
    2.85 -
    2.86 +    hap_unmap_domain_page(p2m);
    2.87      hap_unmap_domain_page(sl2e);
    2.88  }
    2.89  #endif
    2.90 @@ -565,61 +562,37 @@ void hap_vcpu_init(struct vcpu *v)
    2.91  /************************************************/
    2.92  /*          HAP PAGING MODE FUNCTIONS           */
    2.93  /************************************************/
    2.94 -/* In theory, hap should not intercept guest page fault. This function can 
    2.95 - * be recycled to handle host/nested page fault, if needed.
    2.96 +/* 
    2.97 + * HAP guests can handle page faults (in the guest page tables) without
    2.98 + * needing any action from Xen, so we should not be intercepting them.
    2.99   */
   2.100  int hap_page_fault(struct vcpu *v, unsigned long va, 
   2.101                     struct cpu_user_regs *regs)
   2.102  {
   2.103 -    HERE_I_AM;
   2.104 +    HAP_ERROR("Intercepted a guest #PF (%u:%u) with HAP enabled.\n",
   2.105 +              v->domain->domain_id, v->vcpu_id);
   2.106      domain_crash(v->domain);
   2.107      return 0;
   2.108  }
   2.109  
   2.110 -/* called when guest issues a invlpg request. 
   2.111 - * Return 1 if need to issue page invalidation on CPU; Return 0 if does not
   2.112 - * need to do so.
   2.113 +/* 
   2.114 + * HAP guests can handle invlpg without needing any action from Xen, so
   2.115 + * should not be intercepting it. 
   2.116   */
   2.117  int hap_invlpg(struct vcpu *v, unsigned long va)
   2.118  {
   2.119 -    HERE_I_AM;
   2.120 +    HAP_ERROR("Intercepted a guest INVLPG (%u:%u) with HAP enabled.\n",
   2.121 +              v->domain->domain_id, v->vcpu_id);
   2.122 +    domain_crash(v->domain);
   2.123      return 0;
   2.124  }
   2.125  
   2.126 +/*
   2.127 + * HAP guests do not need to take any action on CR3 writes (they are still
   2.128 + * intercepted, so that Xen's copy of the guest's CR3 can be kept in sync.)
   2.129 + */
   2.130  void hap_update_cr3(struct vcpu *v, int do_locking)
   2.131  {
   2.132 -    struct domain *d = v->domain;
   2.133 -    mfn_t gmfn;
   2.134 -
   2.135 -    HERE_I_AM;
   2.136 -    /* Don't do anything on an uninitialised vcpu */
   2.137 -    if ( !is_hvm_domain(d) && !v->is_initialised )
   2.138 -    {
   2.139 -        ASSERT(v->arch.cr3 == 0);
   2.140 -        return;
   2.141 -    }
   2.142 -
   2.143 -    if ( do_locking )
   2.144 -        hap_lock(v->domain);
   2.145 -    
   2.146 -    ASSERT(hap_locked_by_me(v->domain));
   2.147 -    ASSERT(v->arch.paging.mode);
   2.148 -    
   2.149 -    gmfn = pagetable_get_mfn(v->arch.guest_table);
   2.150 -
   2.151 -    make_cr3(v, pagetable_get_pfn(v->arch.monitor_table));
   2.152 -    
   2.153 -    hvm_update_guest_cr3(v, pagetable_get_paddr(v->arch.monitor_table));
   2.154 -
   2.155 -    HAP_PRINTK("d=%u v=%u guest_table=%05lx, monitor_table = %05lx\n", 
   2.156 -               d->domain_id, v->vcpu_id, 
   2.157 -               (unsigned long)pagetable_get_pfn(v->arch.guest_table),
   2.158 -               (unsigned long)pagetable_get_pfn(v->arch.monitor_table));
   2.159 -
   2.160 -    flush_tlb_mask(d->domain_dirty_cpumask);
   2.161 -
   2.162 -    if ( do_locking )
   2.163 -        hap_unlock(v->domain);
   2.164  }
   2.165  
   2.166  void hap_update_paging_modes(struct vcpu *v)
   2.167 @@ -647,7 +620,7 @@ void hap_update_paging_modes(struct vcpu
   2.168          v->arch.paging.mode = &hap_paging_real_mode;
   2.169      }
   2.170  
   2.171 -    v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);    
   2.172 +    v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
   2.173  
   2.174      if ( pagetable_is_null(v->arch.monitor_table) ) {
   2.175          mfn_t mmfn = hap_make_monitor_table(v);
   2.176 @@ -655,7 +628,6 @@ void hap_update_paging_modes(struct vcpu
   2.177          make_cr3(v, mfn_x(mmfn));
   2.178      }
   2.179  
   2.180 -    flush_tlb_mask(d->domain_dirty_cpumask);
   2.181      hap_unlock(d);
   2.182  }
   2.183  
   2.184 @@ -702,29 +674,18 @@ void
   2.185  hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
   2.186                      l1_pgentry_t new, unsigned int level)
   2.187  {
   2.188 -    int do_locking;
   2.189 -
   2.190 -    /* This function can be called from two directions (P2M and log dirty). We
   2.191 -     *  need to make sure this lock has been held or not.
   2.192 -     */
   2.193 -    do_locking = !hap_locked_by_me(v->domain);
   2.194 -
   2.195 -    if ( do_locking )
   2.196 -        hap_lock(v->domain);
   2.197 +    hap_lock(v->domain);
   2.198  
   2.199      safe_write_pte(p, new);
   2.200  #if CONFIG_PAGING_LEVELS == 3
   2.201      /* install P2M in monitor table for PAE Xen */
   2.202 -    if ( level == 3 ) {
   2.203 +    if ( level == 3 ) 
   2.204  	/* We have written to the p2m l3: need to sync the per-vcpu
   2.205           * copies of it in the monitor tables */
   2.206  	p2m_install_entry_in_monitors(v->domain, (l3_pgentry_t *)p);
   2.207 -	
   2.208 -    }
   2.209  #endif
   2.210      
   2.211 -    if ( do_locking )
   2.212 -        hap_unlock(v->domain);
   2.213 +    hap_unlock(v->domain);
   2.214  }
   2.215  
   2.216  /* Entry points into this mode of the hap code. */
     3.1 --- a/xen/arch/x86/mm/p2m.c	Fri Jun 15 16:30:10 2007 +0100
     3.2 +++ b/xen/arch/x86/mm/p2m.c	Fri Jun 15 16:51:08 2007 +0100
     3.3 @@ -32,9 +32,13 @@
     3.4  #define P2M_AUDIT     0
     3.5  #define P2M_DEBUGGING 1
     3.6  
     3.7 -/* The P2M lock.  This protects all updates to the p2m table.
     3.8 +/*
     3.9 + * The P2M lock.  This protects all updates to the p2m table.
    3.10   * Updates are expected to be safe against concurrent reads, 
    3.11 - * which do *not* require the lock */
    3.12 + * which do *not* require the lock.
    3.13 + *
    3.14 + * Locking discipline: always acquire this lock before the shadow or HAP one
    3.15 + */
    3.16  
    3.17  #define p2m_lock_init(_d)                            \
    3.18      do {                                             \