]> xenbits.xensource.com Git - xen.git/commitdiff
nestedhvm: Allocate a separate host ASID for each L2 VCPU.
authorKeir Fraser <keir@xen.org>
Fri, 15 Apr 2011 09:07:42 +0000 (10:07 +0100)
committerKeir Fraser <keir@xen.org>
Fri, 15 Apr 2011 09:07:42 +0000 (10:07 +0100)
This avoids TLB flushing on every L1/L2 transition.

Signed-off-by: Keir Fraser <keir@xen.org>
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
xen/arch/x86/hvm/asid.c
xen/arch/x86/hvm/svm/asid.c
xen/arch/x86/hvm/svm/nestedsvm.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmcs.c
xen/arch/x86/hvm/vmx/vmx.c
xen/include/asm-x86/hvm/asid.h
xen/include/asm-x86/hvm/vcpu.h
xen/include/asm-x86/hvm/vmx/vmx.h

index 1cccaf53d5955a1aef7d2495d27c5d4ba82f6933..bfbf0d174e748bb0a602ffc4fbf35eeb7c58bc99 100644 (file)
@@ -78,9 +78,15 @@ void hvm_asid_init(int nasids)
     data->next_asid = 1;
 }
 
+void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid)
+{
+    asid->generation = 0;
+}
+
 void hvm_asid_flush_vcpu(struct vcpu *v)
 {
-    v->arch.hvm_vcpu.asid_generation = 0;
+    hvm_asid_flush_vcpu_asid(&v->arch.hvm_vcpu.n1asid);
+    hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
 }
 
 void hvm_asid_flush_core(void)
@@ -102,9 +108,8 @@ void hvm_asid_flush_core(void)
     data->disabled = 1;
 }
 
-bool_t hvm_asid_handle_vmenter(void)
+bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid)
 {
-    struct vcpu *curr = current;
     struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
 
     /* On erratum #170 systems we must flush the TLB. 
@@ -113,7 +118,7 @@ bool_t hvm_asid_handle_vmenter(void)
         goto disabled;
 
     /* Test if VCPU has valid ASID. */
-    if ( curr->arch.hvm_vcpu.asid_generation == data->core_asid_generation )
+    if ( asid->generation == data->core_asid_generation )
         return 0;
 
     /* If there are no free ASIDs, need to go to a new generation */
@@ -126,17 +131,17 @@ bool_t hvm_asid_handle_vmenter(void)
     }
 
     /* Now guaranteed to be a free ASID. */
-    curr->arch.hvm_vcpu.asid = data->next_asid++;
-    curr->arch.hvm_vcpu.asid_generation = data->core_asid_generation;
+    asid->asid = data->next_asid++;
+    asid->generation = data->core_asid_generation;
 
     /*
      * When we assign ASID 1, flush all TLB entries as we are starting a new
      * generation, and all old ASID allocations are now stale. 
      */
-    return (curr->arch.hvm_vcpu.asid == 1);
+    return (asid->asid == 1);
 
  disabled:
-    curr->arch.hvm_vcpu.asid = 0;
+    asid->asid = 0;
     return 0;
 }
 
index 1723866f11092beeb8c7a9bf20e041a4dc83b62b..ede2be6cc5b2ff099c617c0c7f3a74a11c8c87e2 100644 (file)
@@ -22,6 +22,7 @@
 #include <xen/perfc.h>
 #include <asm/hvm/svm/asid.h>
 #include <asm/amd.h>
+#include <asm/hvm/nestedhvm.h>
 
 void svm_asid_init(struct cpuinfo_x86 *c)
 {
@@ -42,17 +43,20 @@ asmlinkage void svm_asid_handle_vmrun(void)
 {
     struct vcpu *curr = current;
     struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
-    bool_t need_flush = hvm_asid_handle_vmenter();
+    struct hvm_vcpu_asid *p_asid =
+        nestedhvm_vcpu_in_guestmode(curr)
+        ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm_vcpu.n1asid;
+    bool_t need_flush = hvm_asid_handle_vmenter(p_asid);
 
     /* ASID 0 indicates that ASIDs are disabled. */
-    if ( curr->arch.hvm_vcpu.asid == 0 )
+    if ( p_asid->asid == 0 )
     {
         vmcb_set_guest_asid(vmcb, 1);
         vmcb->tlb_control = 1;
         return;
     }
 
-    vmcb_set_guest_asid(vmcb, curr->arch.hvm_vcpu.asid);
+    vmcb_set_guest_asid(vmcb, p_asid->asid);
     vmcb->tlb_control = need_flush;
 }
 
index 425112a37ee3994d9524e9131e4766048538b6a0..fe03ab2f2ce4ebe7ee4dd47dee78af6fc69a9a8d 100644 (file)
@@ -261,8 +261,6 @@ int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
     /* Cleanbits */
     n1vmcb->cleanbits.bytes = 0;
 
-    hvm_asid_flush_vcpu(v);
-
     return 0;
 }
 
@@ -408,9 +406,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
     if (rc)
         return rc;
 
-    /* ASID */
-    hvm_asid_flush_vcpu(v);
-    /* n2vmcb->_guest_asid = ns_vmcb->_guest_asid; */
+    /* ASID - Emulation handled in hvm_asid_handle_vmenter() */
 
     /* TLB control */
     n2vmcb->tlb_control = n1vmcb->tlb_control | ns_vmcb->tlb_control;
@@ -605,9 +601,13 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs,
     svm->ns_vmcb_guestcr3 = ns_vmcb->_cr3;
     svm->ns_vmcb_hostcr3 = ns_vmcb->_h_cr3;
 
-    nv->nv_flushp2m = (ns_vmcb->tlb_control
-        || (svm->ns_guest_asid != ns_vmcb->_guest_asid));
-    svm->ns_guest_asid = ns_vmcb->_guest_asid;
+    nv->nv_flushp2m = ns_vmcb->tlb_control;
+    if ( svm->ns_guest_asid != ns_vmcb->_guest_asid )
+    {
+        nv->nv_flushp2m = 1;
+        hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
+        svm->ns_guest_asid = ns_vmcb->_guest_asid;
+    }
 
     /* nested paging for the guest */
     svm->ns_hap_enabled = (ns_vmcb->_np_enable) ? 1 : 0;
index 981e5c3217b40cefc267957ca7f49dfe0f36ce6a..b924c1e46f0bf2e1e221b7ec161d64f9aac61f39 100644 (file)
@@ -1580,6 +1580,15 @@ static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs)
     __update_guest_eip(regs, inst_len);
 }
 
+static void svm_invlpga_intercept(
+    struct vcpu *v, unsigned long vaddr, uint32_t asid)
+{
+    svm_invlpga(vaddr,
+                (asid == 0)
+                ? v->arch.hvm_vcpu.n1asid.asid
+                : vcpu_nestedhvm(v).nv_n2asid.asid);
+}
+
 static void svm_invlpg_intercept(unsigned long vaddr)
 {
     struct vcpu *curr = current;
@@ -1894,11 +1903,14 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
     case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
     case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE:
     case VMEXIT_INVLPG:
-    case VMEXIT_INVLPGA:
         if ( !handle_mmio() )
             hvm_inject_exception(TRAP_gp_fault, 0, 0);
         break;
 
+    case VMEXIT_INVLPGA:
+        svm_invlpga_intercept(v, regs->rax, regs->ecx);
+        break;
+
     case VMEXIT_VMMCALL:
         if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )
             break;
index 9b9921cf40d69ded242a2960eec6606e22418832..d139c1380db6dc6880e2a9fcbfac9ba8cb347fb7 100644 (file)
@@ -867,9 +867,6 @@ static int construct_vmcs(struct vcpu *v)
 #endif
     }
 
-    if ( cpu_has_vmx_vpid )
-        __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vcpu.asid);
-
     if ( cpu_has_vmx_pat && paging_mode_hap(d) )
     {
         u64 host_pat, guest_pat;
index cda6420f39aa70f46e2889b4f24f53085f2fab6f..2f4c74e2b4c59502ed2f31f255f2d0b04da8a97e 100644 (file)
@@ -2667,14 +2667,16 @@ asmlinkage void vmx_vmenter_helper(void)
 {
     struct vcpu *curr = current;
     u32 new_asid, old_asid;
+    struct hvm_vcpu_asid *p_asid;
     bool_t need_flush;
 
     if ( !cpu_has_vmx_vpid )
         goto out;
 
-    old_asid = curr->arch.hvm_vcpu.asid;
-    need_flush = hvm_asid_handle_vmenter();
-    new_asid = curr->arch.hvm_vcpu.asid;
+    p_asid = &curr->arch.hvm_vcpu.n1asid;
+    old_asid = p_asid->asid;
+    need_flush = hvm_asid_handle_vmenter(p_asid);
+    new_asid = p_asid->asid;
 
     if ( unlikely(new_asid != old_asid) )
     {
index 4ee520f1dba80a4d0e3e7cdd30af507d199fd319..a01ebeb676304629dc64bed4345753c4da6fce03 100644 (file)
 #include <xen/config.h>
 
 struct vcpu;
+struct hvm_vcpu_asid;
 
 /* Initialise ASID management for the current physical CPU. */
 void hvm_asid_init(int nasids);
 
-/* Invalidate a VCPU's current ASID allocation: forces re-allocation. */
+/* Invalidate a particular ASID allocation: forces re-allocation. */
+void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid);
+
+/* Invalidate all ASID allocations for specified VCPU: forces re-allocation. */
 void hvm_asid_flush_vcpu(struct vcpu *v);
 
 /* Flush all ASIDs on this processor core. */
@@ -35,7 +39,7 @@ void hvm_asid_flush_core(void);
 
 /* Called before entry to guest context. Checks ASID allocation, returns a
  * boolean indicating whether all ASIDs must be flushed. */
-bool_t hvm_asid_handle_vmenter(void);
+bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid);
 
 #endif /* __ASM_X86_HVM_ASID_H__ */
 
index eabecaaccc7fdffac4c65668ab1c5cceeacbb99f..0282c01c3c84bfef90286316f4a4b4b4ff5d2859 100644 (file)
@@ -37,6 +37,11 @@ enum hvm_io_state {
     HVMIO_completed
 };
 
+struct hvm_vcpu_asid {
+    uint64_t generation;
+    uint32_t asid;
+};
+
 #define VMCX_EADDR    (~0ULL)
 
 struct nestedvcpu {
@@ -57,6 +62,8 @@ struct nestedvcpu {
     bool_t nv_flushp2m; /* True, when p2m table must be flushed */
     struct p2m_domain *nv_p2m; /* used p2m table for this vcpu */
 
+    struct hvm_vcpu_asid nv_n2asid;
+
     bool_t nv_vmentry_pending;
     bool_t nv_vmexit_pending;
     bool_t nv_vmswitch_in_progress; /* true during vmentry/vmexit emulation */
@@ -100,8 +107,7 @@ struct hvm_vcpu {
     bool_t              hcall_preempted;
     bool_t              hcall_64bit;
 
-    uint64_t            asid_generation;
-    uint32_t            asid;
+    struct hvm_vcpu_asid n1asid;
 
     u32                 msr_tsc_aux;
 
index 8e685e4dc6c8d8342b12dcd610fb58a06bd2d78e..23406fa230acf88608f1cc1251f64971ec3709d4 100644 (file)
@@ -377,7 +377,7 @@ static inline void vpid_sync_vcpu_gva(struct vcpu *v, unsigned long gva)
         type = INVVPID_ALL_CONTEXT;
 
 execute_invvpid:
-    __invvpid(type, v->arch.hvm_vcpu.asid, (u64)gva);
+    __invvpid(type, v->arch.hvm_vcpu.n1asid.asid, (u64)gva);
 }
 
 static inline void vpid_sync_all(void)