From 9f0ac19b53cf8e7e09422b1fb3bb5cf7fffd7639 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Fri, 16 Nov 2007 16:36:38 +0000 Subject: [PATCH] vmx: wbinvd optimization for pass-through domain. Optimise wbinvd exit emulation for pass-through domains to avoid "always wbinvd" when a VCPU is migrated. Instead, do host wbinvd on all host CPUs when wbinvd exit. Signed-off-by Yaozu (Eddie) Dong --- xen/arch/x86/hvm/vmx/vmcs.c | 19 ++++++++++++------- xen/arch/x86/hvm/vmx/vmx.c | 28 ++++++++++++++++++++-------- xen/include/asm-x86/hvm/vmx/vmcs.h | 2 ++ 3 files changed, 34 insertions(+), 15 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 14d54496ba..03cf5cbc92 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -763,7 +763,7 @@ void vm_resume_fail(unsigned long eflags) domain_crash_synchronous(); } -static void flush_cache(void *info) +static void wbinvd_ipi(void *info) { wbinvd(); } @@ -779,16 +779,21 @@ void vmx_do_resume(struct vcpu *v) } else { - /* For pass-through domain, guest PCI-E device driver may leverage the - * "Non-Snoop" I/O, and explicitly "WBINVD" or "CFLUSH" to a RAM space. - * In that case, if migration occurs before "WBINVD" or "CFLUSH", need - * to maintain data consistency. + /* + * For pass-through domain, guest PCI-E device driver may leverage the + * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space. + * Since migration may occur before WBINVD or CLFLUSH, we need to + * maintain data consistency either by: + * 1: flushing cache (wbinvd) when the guest is scheduled out if + * there is no wbinvd exit, or + * 2: execute wbinvd on all dirty pCPUs when guest wbinvd exits. */ - if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) ) + if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) && + !cpu_has_wbinvd_exiting ) { int cpu = v->arch.hvm_vmx.active_cpu; if ( cpu != -1 ) - on_selected_cpus(cpumask_of_cpu(cpu), flush_cache, NULL, 1, 1); + on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1); } vmx_clear_vmcs(v); diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 348e69c5e0..14884bd9bc 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2638,6 +2638,11 @@ static void vmx_do_extint(struct cpu_user_regs *regs) } } +static void wbinvd_ipi(void *info) +{ + wbinvd(); +} + static void vmx_failed_vmentry(unsigned int exit_reason, struct cpu_user_regs *regs) { @@ -2913,14 +2918,21 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs) __update_guest_eip(inst_len); if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) ) { - wbinvd(); - /* Disable further WBINVD intercepts. */ - if ( (exit_reason == EXIT_REASON_WBINVD) && - (vmx_cpu_based_exec_control & - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ) - __vmwrite(SECONDARY_VM_EXEC_CONTROL, - vmx_secondary_exec_control & - ~SECONDARY_EXEC_WBINVD_EXITING); + if ( cpu_has_wbinvd_exiting ) + { + on_each_cpu(wbinvd_ipi, NULL, 1, 1); + } + else + { + wbinvd(); + /* Disable further WBINVD intercepts. */ + if ( (exit_reason == EXIT_REASON_WBINVD) && + (vmx_cpu_based_exec_control & + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ) + __vmwrite(SECONDARY_VM_EXEC_CONTROL, + vmx_secondary_exec_control & + ~SECONDARY_EXEC_WBINVD_EXITING); + } } break; } diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index d3da4e3549..515eafdfba 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -136,6 +136,8 @@ extern u32 vmx_secondary_exec_control; extern bool_t cpu_has_vmx_ins_outs_instr_info; +#define cpu_has_wbinvd_exiting \ + (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING) #define cpu_has_vmx_virtualize_apic_accesses \ (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) #define cpu_has_vmx_tpr_shadow \ -- 2.39.5