direct-io.hg

changeset 15387:739d698986e9

[HVM][SVM] flush all entries from guest ASIDs when xen writes CR3.
This makes the assumptions about TLB flush behaviour in the page-type
system and the shadow code safe again, and fixes a corner case of NPT
log-dirty.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Wed Jun 20 10:55:37 2007 +0100 (2007-06-20)
parents fb5077ecf9a4
children 50358c4b37f4
files xen/arch/x86/flushtlb.c xen/arch/x86/hvm/svm/asid.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/support.h xen/include/asm-x86/hvm/svm/asid.h
line diff
     1.1 --- a/xen/arch/x86/flushtlb.c	Tue Jun 19 18:07:53 2007 +0100
     1.2 +++ b/xen/arch/x86/flushtlb.c	Wed Jun 20 10:55:37 2007 +0100
     1.3 @@ -80,6 +80,8 @@ void write_cr3(unsigned long cr3)
     1.4  
     1.5      t = pre_flush();
     1.6  
     1.7 +    hvm_flush_guest_tlbs();
     1.8 +
     1.9  #ifdef USER_MAPPINGS_ARE_GLOBAL
    1.10      __pge_off();
    1.11      __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
    1.12 @@ -103,6 +105,8 @@ void local_flush_tlb(void)
    1.13  
    1.14      t = pre_flush();
    1.15  
    1.16 +    hvm_flush_guest_tlbs();
    1.17 +
    1.18  #ifdef USER_MAPPINGS_ARE_GLOBAL
    1.19      __pge_off();
    1.20      __pge_on();
     2.1 --- a/xen/arch/x86/hvm/svm/asid.c	Tue Jun 19 18:07:53 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/asid.c	Wed Jun 20 10:55:37 2007 +0100
     2.3 @@ -60,7 +60,7 @@ struct svm_asid_data {
     2.4     u64 core_asid_generation;
     2.5     u32 next_asid;
     2.6     u32 max_asid;
     2.7 -   u32 erratum170;
     2.8 +   u32 erratum170:1;
     2.9  };
    2.10  
    2.11  static DEFINE_PER_CPU(struct svm_asid_data, svm_asid_data);
    2.12 @@ -140,25 +140,21 @@ void svm_asid_init_vcpu(struct vcpu *v)
    2.13  }
    2.14  
    2.15  /*
    2.16 - * Increase the Generation to make free ASIDs.  Flush physical TLB and give
    2.17 - * ASID.
    2.18 + * Increase the Generation to make free ASIDs, and indirectly cause a 
    2.19 + * TLB flush of all ASIDs on the next vmrun.
    2.20   */
    2.21 -static void svm_asid_handle_inc_generation(struct vcpu *v)
    2.22 +void svm_asid_inc_generation(void)
    2.23  {
    2.24      struct svm_asid_data *data = svm_asid_core_data();
    2.25  
    2.26 -    if ( likely(data->core_asid_generation <  SVM_ASID_LAST_GENERATION) )
    2.27 +    if ( likely(data->core_asid_generation < SVM_ASID_LAST_GENERATION) )
    2.28      {
    2.29 -        /* Handle ASID overflow. */
    2.30 +        /* Move to the next generation.  We can't flush the TLB now
    2.31 +         * because you need to vmrun to do that, and current might not
    2.32 +         * be a HVM vcpu, but the first HVM vcpu that runs after this 
    2.33 +         * will pick up ASID 1 and flush the TLBs. */
    2.34          data->core_asid_generation++;
    2.35 -        data->next_asid = SVM_ASID_FIRST_GUEST_ASID + 1;
    2.36 -
    2.37 -        /* Handle VCPU. */
    2.38 -        v->arch.hvm_svm.vmcb->guest_asid = SVM_ASID_FIRST_GUEST_ASID;
    2.39 -        v->arch.hvm_svm.asid_generation  = data->core_asid_generation;
    2.40 -
    2.41 -        /* Trigger flush of physical TLB. */
    2.42 -        v->arch.hvm_svm.vmcb->tlb_control = 1;
    2.43 +        data->next_asid = SVM_ASID_FIRST_GUEST_ASID;
    2.44          return;
    2.45      }
    2.46  
    2.47 @@ -168,11 +164,12 @@ static void svm_asid_handle_inc_generati
    2.48       * this core (flushing TLB always). So correctness is established; it
    2.49       * only runs a bit slower.
    2.50       */
    2.51 -    printk("AMD SVM: ASID generation overrun. Disabling ASIDs.\n");
    2.52 -    data->erratum170 = 1;
    2.53 -    data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
    2.54 -
    2.55 -    svm_asid_init_vcpu(v);
    2.56 +    if ( !data->erratum170 )
    2.57 +    {
    2.58 +        printk("AMD SVM: ASID generation overrun. Disabling ASIDs.\n");
    2.59 +        data->erratum170 = 1;
    2.60 +        data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
    2.61 +    }
    2.62  }
    2.63  
    2.64  /*
    2.65 @@ -202,18 +199,21 @@ asmlinkage void svm_asid_handle_vmrun(vo
    2.66          return;
    2.67      }
    2.68  
    2.69 -    /* Different ASID generations trigger fetching of a fresh ASID. */
    2.70 -    if ( likely(data->next_asid <= data->max_asid) )
    2.71 -    {
    2.72 -        /* There is a free ASID. */
    2.73 -        v->arch.hvm_svm.vmcb->guest_asid = data->next_asid++;
    2.74 -        v->arch.hvm_svm.asid_generation  = data->core_asid_generation;
    2.75 +    /* If there are no free ASIDs, need to go to a new generation */
    2.76 +    if ( unlikely(data->next_asid > data->max_asid) )
    2.77 +        svm_asid_inc_generation();
    2.78 +
    2.79 +    /* Now guaranteed to be a free ASID. */
    2.80 +    v->arch.hvm_svm.vmcb->guest_asid = data->next_asid++;
    2.81 +    v->arch.hvm_svm.asid_generation  = data->core_asid_generation;
    2.82 +
    2.83 +    /* When we assign ASID 1, flush all TLB entries.  We need to do it 
    2.84 +     * here because svm_asid_inc_generation() can be called at any time, 
    2.85 +     * but the TLB flush can only happen on vmrun. */
    2.86 +    if ( v->arch.hvm_svm.vmcb->guest_asid == SVM_ASID_FIRST_GUEST_ASID )
    2.87 +        v->arch.hvm_svm.vmcb->tlb_control = 1;
    2.88 +    else
    2.89          v->arch.hvm_svm.vmcb->tlb_control = 0;
    2.90 -        return;
    2.91 -    }
    2.92 -
    2.93 -    /* Slow path, may cause TLB flush. */
    2.94 -    svm_asid_handle_inc_generation(v);
    2.95  }
    2.96  
    2.97  void svm_asid_inv_asid(struct vcpu *v)
     3.1 --- a/xen/arch/x86/hvm/svm/svm.c	Tue Jun 19 18:07:53 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Jun 20 10:55:37 2007 +0100
     3.3 @@ -598,6 +598,14 @@ static void svm_update_guest_cr3(struct 
     3.4      v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
     3.5  }
     3.6  
     3.7 +static void svm_flush_guest_tlbs(void)
     3.8 +{
     3.9 +    /* Roll over the CPU's ASID generation, so it gets a clean TLB when we
    3.10 +     * next VMRUN.  (If ASIDs are disabled, the whole TLB is flushed on
    3.11 +     * VMRUN anyway). */
    3.12 +    svm_asid_inc_generation();
    3.13 +}
    3.14 +
    3.15  static void svm_update_vtpr(struct vcpu *v, unsigned long value)
    3.16  {
    3.17      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.18 @@ -948,6 +956,7 @@ static struct hvm_function_table svm_fun
    3.19      .get_segment_register = svm_get_segment_register,
    3.20      .update_host_cr3      = svm_update_host_cr3,
    3.21      .update_guest_cr3     = svm_update_guest_cr3,
    3.22 +    .flush_guest_tlbs     = svm_flush_guest_tlbs,
    3.23      .update_vtpr          = svm_update_vtpr,
    3.24      .stts                 = svm_stts,
    3.25      .set_tsc_offset       = svm_set_tsc_offset,
     4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Tue Jun 19 18:07:53 2007 +0100
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Jun 20 10:55:37 2007 +0100
     4.3 @@ -1138,6 +1138,12 @@ static void vmx_update_guest_cr3(struct 
     4.4      vmx_vmcs_exit(v);
     4.5  }
     4.6  
     4.7 +static void vmx_flush_guest_tlbs(void)
     4.8 +{
     4.9 +    /* No tagged TLB support on VMX yet.  The fact that we're in Xen 
    4.10 +     * at all means any guest will have a clean TLB when it's next run,
    4.11 +     * because VMRESUME will flush it for us. */
    4.12 +}
    4.13  
    4.14  static void vmx_inject_exception(
    4.15      unsigned int trapnr, int errcode, unsigned long cr2)
    4.16 @@ -1205,6 +1211,7 @@ static struct hvm_function_table vmx_fun
    4.17      .get_segment_register = vmx_get_segment_register,
    4.18      .update_host_cr3      = vmx_update_host_cr3,
    4.19      .update_guest_cr3     = vmx_update_guest_cr3,
    4.20 +    .flush_guest_tlbs     = vmx_flush_guest_tlbs,
    4.21      .update_vtpr          = vmx_update_vtpr,
    4.22      .stts                 = vmx_stts,
    4.23      .set_tsc_offset       = vmx_set_tsc_offset,
     5.1 --- a/xen/include/asm-x86/hvm/hvm.h	Tue Jun 19 18:07:53 2007 +0100
     5.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Wed Jun 20 10:55:37 2007 +0100
     5.3 @@ -124,6 +124,13 @@ struct hvm_function_table {
     5.4      void (*update_guest_cr3)(struct vcpu *v);
     5.5  
     5.6      /*
     5.7 +     * Called to ensure than all guest-specific mappings in a tagged TLB
     5.8 +     * are flushed; does *not* flush Xen's TLB entries, and on
     5.9 +     * processors without a tagged TLB it will be a noop.
    5.10 +     */
    5.11 +    void (*flush_guest_tlbs)(void);
    5.12 +
    5.13 +    /*
    5.14       * Reflect the virtual APIC's value in the guest's V_TPR register
    5.15       */
    5.16      void (*update_vtpr)(struct vcpu *v, unsigned long value);
    5.17 @@ -148,6 +155,7 @@ struct hvm_function_table {
    5.18  };
    5.19  
    5.20  extern struct hvm_function_table hvm_funcs;
    5.21 +extern int hvm_enabled;
    5.22  
    5.23  int hvm_domain_initialise(struct domain *d);
    5.24  void hvm_domain_relinquish_resources(struct domain *d);
    5.25 @@ -231,6 +239,13 @@ hvm_update_vtpr(struct vcpu *v, unsigned
    5.26  
    5.27  void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
    5.28  
    5.29 +static inline void 
    5.30 +hvm_flush_guest_tlbs(void)
    5.31 +{
    5.32 +    if ( hvm_enabled )
    5.33 +        hvm_funcs.flush_guest_tlbs();
    5.34 +}
    5.35 +
    5.36  void hvm_hypercall_page_initialise(struct domain *d,
    5.37                                     void *hypercall_page);
    5.38  
     6.1 --- a/xen/include/asm-x86/hvm/support.h	Tue Jun 19 18:07:53 2007 +0100
     6.2 +++ b/xen/include/asm-x86/hvm/support.h	Wed Jun 20 10:55:37 2007 +0100
     6.3 @@ -215,7 +215,6 @@ int hvm_load(struct domain *d, hvm_domai
     6.4  /* End of save/restore */
     6.5  
     6.6  extern char hvm_io_bitmap[];
     6.7 -extern int hvm_enabled;
     6.8  
     6.9  void hvm_enable(struct hvm_function_table *);
    6.10  void hvm_disable(void);
     7.1 --- a/xen/include/asm-x86/hvm/svm/asid.h	Tue Jun 19 18:07:53 2007 +0100
     7.2 +++ b/xen/include/asm-x86/hvm/svm/asid.h	Wed Jun 20 10:55:37 2007 +0100
     7.3 @@ -30,6 +30,7 @@
     7.4  void svm_asid_init(struct cpuinfo_x86 *c);
     7.5  void svm_asid_init_vcpu(struct vcpu *v);
     7.6  void svm_asid_inv_asid(struct vcpu *v);
     7.7 +void svm_asid_inc_generation(void);
     7.8  
     7.9  /*
    7.10   * ASID related, guest triggered events.