]> xenbits.xensource.com Git - xen.git/commitdiff
vmx: wbinvd optimization for pass-through domain.
authorKeir Fraser <keir.fraser@citrix.com>
Fri, 16 Nov 2007 16:36:38 +0000 (16:36 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Fri, 16 Nov 2007 16:36:38 +0000 (16:36 +0000)
Optimise wbinvd exit emulation for pass-through domains to avoid
"always wbinvd" when a VCPU is migrated. Instead, do host wbinvd on
all host CPUs when wbinvd exit.

Signed-off-by Yaozu (Eddie) Dong <eddie.dong@intel.com>

xen/arch/x86/hvm/vmx/vmcs.c
xen/arch/x86/hvm/vmx/vmx.c
xen/include/asm-x86/hvm/vmx/vmcs.h

index 14d54496ba850933afe33ed5e3d0d264a1633767..03cf5cbc92d45a0c1b01b283483a54604a17ec11 100644 (file)
@@ -763,7 +763,7 @@ void vm_resume_fail(unsigned long eflags)
     domain_crash_synchronous();
 }
 
-static void flush_cache(void *info)
+static void wbinvd_ipi(void *info)
 {
     wbinvd();
 }
@@ -779,16 +779,21 @@ void vmx_do_resume(struct vcpu *v)
     }
     else
     {
-        /* For pass-through domain, guest PCI-E device driver may leverage the
-         * "Non-Snoop" I/O, and explicitly "WBINVD" or "CFLUSH" to a RAM space.
-         * In that case, if migration occurs before "WBINVD" or "CFLUSH", need
-         * to maintain data consistency.
+        /*
+         * For pass-through domain, guest PCI-E device driver may leverage the
+         * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space.
+         * Since migration may occur before WBINVD or CLFLUSH, we need to
+         * maintain data consistency either by:
+         *  1: flushing cache (wbinvd) when the guest is scheduled out if
+         *     there is no wbinvd exit, or
+         *  2: execute wbinvd on all dirty pCPUs when guest wbinvd exits.
          */
-        if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) )
+        if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) &&
+             !cpu_has_wbinvd_exiting )
         {
             int cpu = v->arch.hvm_vmx.active_cpu;
             if ( cpu != -1 )
-                on_selected_cpus(cpumask_of_cpu(cpu), flush_cache, NULL, 1, 1);
+                on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1);
         }
 
         vmx_clear_vmcs(v);
index 348e69c5e0c18f052521e5386e249d6fa4658806..14884bd9bce11dd63b421e74f5dea05011a855ac 100644 (file)
@@ -2638,6 +2638,11 @@ static void vmx_do_extint(struct cpu_user_regs *regs)
     }
 }
 
+static void wbinvd_ipi(void *info)
+{
+    wbinvd();
+}
+
 static void vmx_failed_vmentry(unsigned int exit_reason,
                                struct cpu_user_regs *regs)
 {
@@ -2913,14 +2918,21 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
         __update_guest_eip(inst_len);
         if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) )
         {
-            wbinvd();
-            /* Disable further WBINVD intercepts. */
-            if ( (exit_reason == EXIT_REASON_WBINVD) &&
-                 (vmx_cpu_based_exec_control &
-                  CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) )
-                __vmwrite(SECONDARY_VM_EXEC_CONTROL,
-                          vmx_secondary_exec_control &
-                          ~SECONDARY_EXEC_WBINVD_EXITING);
+            if ( cpu_has_wbinvd_exiting )
+            {
+                on_each_cpu(wbinvd_ipi, NULL, 1, 1);
+            }
+            else
+            {
+                wbinvd();
+                /* Disable further WBINVD intercepts. */
+                if ( (exit_reason == EXIT_REASON_WBINVD) &&
+                     (vmx_cpu_based_exec_control &
+                      CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) )
+                    __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+                              vmx_secondary_exec_control &
+                              ~SECONDARY_EXEC_WBINVD_EXITING);
+            }
         }
         break;
     }
index d3da4e3549ba2a0b1870b1d55204a346dd318ec0..515eafdfbabc9469d44cdc178f0efd9fcd1c7022 100644 (file)
@@ -136,6 +136,8 @@ extern u32 vmx_secondary_exec_control;
 
 extern bool_t cpu_has_vmx_ins_outs_instr_info;
 
+#define cpu_has_wbinvd_exiting \
+    (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING)
 #define cpu_has_vmx_virtualize_apic_accesses \
     (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
 #define cpu_has_vmx_tpr_shadow \