]> xenbits.xensource.com Git - people/royger/xen.git/commitdiff
x86/svm: add EFER SVME support for VGIF/VLOAD
authorBrian Woods <brian.woods@amd.com>
Tue, 20 Feb 2018 22:27:02 +0000 (16:27 -0600)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 21 Feb 2018 17:17:39 +0000 (17:17 +0000)
Only enable virtual VMLOAD/SAVE and VGIF if the guest EFER.SVME is set.

Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Brian Woods <brian.woods@amd.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
xen/arch/x86/hvm/svm/nestedsvm.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/svm/vmcb.c
xen/include/asm-x86/hvm/svm/nestedsvm.h

index 1f5981fc18a2d893db25be790ee7a9e19b449a0c..6457532d4bab410553eafdcf46455c0c886fde04 100644 (file)
@@ -1665,3 +1665,69 @@ void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v)
 
     __update_guest_eip(regs, inst_len);
 }
+
+/*
+ * This runs on EFER change to see if nested features need to either be
+ * turned off or on.
+ */
+void svm_nested_features_on_efer_update(struct vcpu *v)
+{
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct nestedsvm *svm = &vcpu_nestedsvm(v);
+    u32 general2_intercepts;
+    vintr_t vintr;
+
+    /*
+     * Need state for transfering the nested gif status so only write on
+     * the hvm_vcpu EFER.SVME changing.
+     */
+    if ( v->arch.hvm_vcpu.guest_efer & EFER_SVME )
+    {
+        if ( !vmcb->virt_ext.fields.vloadsave_enable &&
+             paging_mode_hap(v->domain) &&
+             cpu_has_svm_vloadsave )
+        {
+            vmcb->virt_ext.fields.vloadsave_enable = 1;
+            general2_intercepts  = vmcb_get_general2_intercepts(vmcb);
+            general2_intercepts &= ~(GENERAL2_INTERCEPT_VMLOAD |
+                                     GENERAL2_INTERCEPT_VMSAVE);
+            vmcb_set_general2_intercepts(vmcb, general2_intercepts);
+        }
+
+        if ( !vmcb->_vintr.fields.vgif_enable &&
+             cpu_has_svm_vgif )
+        {
+            vintr = vmcb_get_vintr(vmcb);
+            vintr.fields.vgif = svm->ns_gif;
+            vintr.fields.vgif_enable = 1;
+            vmcb_set_vintr(vmcb, vintr);
+            general2_intercepts  = vmcb_get_general2_intercepts(vmcb);
+            general2_intercepts &= ~(GENERAL2_INTERCEPT_STGI |
+                                     GENERAL2_INTERCEPT_CLGI);
+            vmcb_set_general2_intercepts(vmcb, general2_intercepts);
+        }
+    }
+    else
+    {
+        if ( vmcb->virt_ext.fields.vloadsave_enable )
+        {
+            vmcb->virt_ext.fields.vloadsave_enable = 0;
+            general2_intercepts  = vmcb_get_general2_intercepts(vmcb);
+            general2_intercepts |= (GENERAL2_INTERCEPT_VMLOAD |
+                                    GENERAL2_INTERCEPT_VMSAVE);
+            vmcb_set_general2_intercepts(vmcb, general2_intercepts);
+        }
+
+        if ( vmcb->_vintr.fields.vgif_enable )
+        {
+            vintr = vmcb_get_vintr(vmcb);
+            svm->ns_gif = vintr.fields.vgif;
+            vintr.fields.vgif_enable = 0;
+            vmcb_set_vintr(vmcb, vintr);
+            general2_intercepts  = vmcb_get_general2_intercepts(vmcb);
+            general2_intercepts |= (GENERAL2_INTERCEPT_STGI |
+                                    GENERAL2_INTERCEPT_CLGI);
+            vmcb_set_general2_intercepts(vmcb, general2_intercepts);
+        }
+    }
+}
index 9f58afc2d81e9ac0daa0fefea7ccc97bfb338afc..64d2955b2225178994f4ad1409dfb9cd57e1575c 100644 (file)
@@ -631,6 +631,12 @@ static void svm_update_guest_efer(struct vcpu *v)
     if ( lma )
         new_efer |= EFER_LME;
     vmcb_set_efer(vmcb, new_efer);
+
+    ASSERT(nestedhvm_enabled(v->domain) ||
+           !(v->arch.hvm_vcpu.guest_efer & EFER_SVME));
+
+    if ( nestedhvm_enabled(v->domain) )
+        svm_nested_features_on_efer_update(v);
 }
 
 static void svm_cpuid_policy_changed(struct vcpu *v)
index 0e6cba5b7bcc0a8a19176d540de7a7e9e642925a..997e7597e0c6c9b8b0f45cfc15ec2002e01cc35c 100644 (file)
@@ -200,29 +200,12 @@ static int construct_vmcb(struct vcpu *v)
 
         /* PAT is under complete control of SVM when using nested paging. */
         svm_disable_intercept_for_msr(v, MSR_IA32_CR_PAT);
-
-        /* Use virtual VMLOAD/VMSAVE if available. */
-        if ( cpu_has_svm_vloadsave )
-        {
-            vmcb->virt_ext.fields.vloadsave_enable = 1;
-            vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_VMLOAD;
-            vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_VMSAVE;
-        }
     }
     else
     {
         vmcb->_exception_intercepts |= (1U << TRAP_page_fault);
     }
 
-    /* if available, enable and configure virtual gif */
-    if ( cpu_has_svm_vgif )
-    {
-        vmcb->_vintr.fields.vgif = 1;
-        vmcb->_vintr.fields.vgif_enable = 1;
-        vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_STGI;
-        vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_CLGI;
-    }
-
     if ( cpu_has_pause_filter )
     {
         vmcb->_pause_filter_count = SVM_PAUSEFILTER_INIT;
index a619b6131b682cc5d870fb708349ffbcb096a3aa..abcf2e7c9cd60c6b72882b992c23bd0dc79ae8d2 100644 (file)
@@ -104,6 +104,7 @@ nestedsvm_vmexit_n2n1(struct vcpu *v, struct cpu_user_regs *regs);
 enum nestedhvm_vmexits
 nestedsvm_check_intercepts(struct vcpu *v, struct cpu_user_regs *regs,
     uint64_t exitcode);
+void svm_nested_features_on_efer_update(struct vcpu *v);
 
 /* Interface methods */
 void nsvm_vcpu_destroy(struct vcpu *v);