domain_crash_synchronous();
}
-static void flush_cache(void *info)
+static void wbinvd_ipi(void *info)
{
wbinvd();
}
}
else
{
- /* For pass-through domain, guest PCI-E device driver may leverage the
- * "Non-Snoop" I/O, and explicitly "WBINVD" or "CFLUSH" to a RAM space.
- * In that case, if migration occurs before "WBINVD" or "CFLUSH", need
- * to maintain data consistency.
+ /*
+ * For pass-through domain, guest PCI-E device driver may leverage the
+ * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space.
+ * Since migration may occur before WBINVD or CLFLUSH, we need to
+ * maintain data consistency either by:
+ * 1: flushing cache (wbinvd) when the guest is scheduled out if
+ * there is no wbinvd exit, or
+ * 2: execute wbinvd on all dirty pCPUs when guest wbinvd exits.
*/
- if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) )
+ if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) &&
+ !cpu_has_wbinvd_exiting )
{
int cpu = v->arch.hvm_vmx.active_cpu;
if ( cpu != -1 )
- on_selected_cpus(cpumask_of_cpu(cpu), flush_cache, NULL, 1, 1);
+ on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1);
}
vmx_clear_vmcs(v);
}
}
+static void wbinvd_ipi(void *info)
+{
+ wbinvd();
+}
+
static void vmx_failed_vmentry(unsigned int exit_reason,
struct cpu_user_regs *regs)
{
__update_guest_eip(inst_len);
if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) )
{
- wbinvd();
- /* Disable further WBINVD intercepts. */
- if ( (exit_reason == EXIT_REASON_WBINVD) &&
- (vmx_cpu_based_exec_control &
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) )
- __vmwrite(SECONDARY_VM_EXEC_CONTROL,
- vmx_secondary_exec_control &
- ~SECONDARY_EXEC_WBINVD_EXITING);
+ if ( cpu_has_wbinvd_exiting )
+ {
+ on_each_cpu(wbinvd_ipi, NULL, 1, 1);
+ }
+ else
+ {
+ wbinvd();
+ /* Disable further WBINVD intercepts. */
+ if ( (exit_reason == EXIT_REASON_WBINVD) &&
+ (vmx_cpu_based_exec_control &
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) )
+ __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+ vmx_secondary_exec_control &
+ ~SECONDARY_EXEC_WBINVD_EXITING);
+ }
}
break;
}
extern bool_t cpu_has_vmx_ins_outs_instr_info;
+#define cpu_has_wbinvd_exiting \
+ (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING)
#define cpu_has_vmx_virtualize_apic_accesses \
(vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
#define cpu_has_vmx_tpr_shadow \