From: Kai Huang Date: Wed, 21 Oct 2015 08:49:16 +0000 (+0200) Subject: x86/ept: defer enabling of EPT A/D bit until PML get enabled X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=2ed8b8363269554a3b91e99b3acdecac52f8b500;p=people%2Froyger%2Fxen.git x86/ept: defer enabling of EPT A/D bit until PML get enabled Existing PML implementation turns on EPT A/D bit unconditionally if PML is supported by hardware. This works but enabling of EPT A/D bit can be deferred until PML get enabled. There's no point in enabling the extra feature for every domain when we're not meaning to use it (yet). Also added ASSERT of domain having been paused to ept_flush_pml_buffers to make it consistent with ept_enable{disable}_pml. Sanity live migration and GUI display were tested on Broadwell Machine. Suggested-by: Jan Beulich Signed-off-by: Kai Huang Reviewed-by: Jan Beulich Reviewed-by: Andrew Cooper Acked-by: Kevin Tian --- diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 3592a88847..c11f3ecb7f 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -1553,6 +1553,30 @@ void vmx_domain_flush_pml_buffers(struct domain *d) vmx_vcpu_flush_pml_buffer(v); } +static void vmx_vcpu_update_eptp(struct vcpu *v, u64 eptp) +{ + vmx_vmcs_enter(v); + __vmwrite(EPT_POINTER, eptp); + vmx_vmcs_exit(v); +} + +/* + * Update EPTP data to VMCS of all vcpus of the domain. Must be called when + * domain is paused. + */ +void vmx_domain_update_eptp(struct domain *d) +{ + struct p2m_domain *p2m = p2m_get_hostp2m(d); + struct vcpu *v; + + ASSERT(atomic_read(&d->pause_count)); + + for_each_vcpu ( d, v ) + vmx_vcpu_update_eptp(v, ept_get_eptp(&p2m->ept)); + + ept_sync_domain(p2m); +} + int vmx_create_vmcs(struct vcpu *v) { struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c index 74ce9e0262..86440fc1e0 100644 --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -1129,21 +1129,39 @@ void ept_sync_domain(struct p2m_domain *p2m) static void ept_enable_pml(struct p2m_domain *p2m) { + /* Domain must have been paused */ + ASSERT(atomic_read(&p2m->domain->pause_count)); + /* - * No need to check if vmx_domain_enable_pml has succeeded or not, as + * No need to return whether vmx_domain_enable_pml has succeeded, as * ept_p2m_type_to_flags will do the check, and write protection will be * used if PML is not enabled. */ - vmx_domain_enable_pml(p2m->domain); + if ( vmx_domain_enable_pml(p2m->domain) ) + return; + + /* Enable EPT A/D bit for PML */ + p2m->ept.ept_ad = 1; + vmx_domain_update_eptp(p2m->domain); } static void ept_disable_pml(struct p2m_domain *p2m) { + /* Domain must have been paused */ + ASSERT(atomic_read(&p2m->domain->pause_count)); + vmx_domain_disable_pml(p2m->domain); + + /* Disable EPT A/D bit */ + p2m->ept.ept_ad = 0; + vmx_domain_update_eptp(p2m->domain); } static void ept_flush_pml_buffers(struct p2m_domain *p2m) { + /* Domain must have been paused */ + ASSERT(atomic_read(&p2m->domain->pause_count)); + vmx_domain_flush_pml_buffers(p2m->domain); } @@ -1166,8 +1184,6 @@ int ept_p2m_init(struct p2m_domain *p2m) if ( cpu_has_vmx_pml ) { - /* Enable EPT A/D bits if we are going to use PML. */ - ept->ept_ad = cpu_has_vmx_pml ? 1 : 0; p2m->enable_hardware_log_dirty = ept_enable_pml; p2m->disable_hardware_log_dirty = ept_disable_pml; p2m->flush_hardware_cached_dirty = ept_flush_pml_buffers; diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index 1a71de65ac..865d9fc51e 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -517,6 +517,8 @@ int vmx_domain_enable_pml(struct domain *d); void vmx_domain_disable_pml(struct domain *d); void vmx_domain_flush_pml_buffers(struct domain *d); +void vmx_domain_update_eptp(struct domain *d); + #endif /* ASM_X86_HVM_VMX_VMCS_H__ */ /*