]> xenbits.xensource.com Git - xen.git/commitdiff
x86/cpuid: update signature of hvm_cr4_guest_valid_bits()
authorSergey Dyasli <sergey.dyasli@citrix.com>
Thu, 22 Mar 2018 11:32:36 +0000 (11:32 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 5 Apr 2018 09:41:42 +0000 (10:41 +0100)
With the new cpuid infrastructure there is a domain-wide struct cpuid
policy and there is no need to pass a separate struct vcpu * into
hvm_cr4_guest_valid_bits() anymore. Make the function accept struct
domain * instead and update callers.

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
xen/arch/x86/hvm/domain.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/svmdebug.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/hvm/vmx/vvmx.c
xen/include/asm-x86/hvm/hvm.h

index 60474649de7bbab533e0720f3b0866dc0050bd90..ce15ce0470c91f4f740028d9a53a4376cc81f1fa 100644 (file)
@@ -111,6 +111,7 @@ static int check_segment(struct segment_register *reg, enum x86_segment seg)
 /* Called by VCPUOP_initialise for HVM guests. */
 int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
 {
+    const struct domain *d = v->domain;
     struct cpu_user_regs *uregs = &v->arch.user_regs;
     struct segment_register cs, ds, ss, es, tr;
     const char *errstr;
@@ -272,7 +273,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
     if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
         v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
 
-    if ( v->arch.hvm_vcpu.guest_cr[4] & ~hvm_cr4_guest_valid_bits(v, 0) )
+    if ( v->arch.hvm_vcpu.guest_cr[4] & ~hvm_cr4_guest_valid_bits(d, false) )
     {
         gprintk(XENLOG_ERR, "Bad CR4 value: %#016lx\n",
                 v->arch.hvm_vcpu.guest_cr[4]);
index 1376ce594253fd95184eb2d4b4da8ef8394f5a75..9b1dddbfa6d935ccbe508dc4fa49e774fc070cd3 100644 (file)
@@ -940,9 +940,8 @@ const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
         X86_CR0_CD | X86_CR0_PG)))
 
 /* These bits in CR4 can be set by the guest. */
-unsigned long hvm_cr4_guest_valid_bits(const struct vcpu *v, bool restore)
+unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore)
 {
-    const struct domain *d = v->domain;
     const struct cpuid_policy *p;
     bool mce, vmxe;
 
@@ -1009,7 +1008,7 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
         return -EINVAL;
     }
 
-    if ( ctxt.cr4 & ~hvm_cr4_guest_valid_bits(v, 1) )
+    if ( ctxt.cr4 & ~hvm_cr4_guest_valid_bits(d, true) )
     {
         printk(XENLOG_G_ERR "HVM%d restore: bad CR4 %#" PRIx64 "\n",
                d->domain_id, ctxt.cr4);
@@ -2360,7 +2359,7 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer)
     struct vcpu *v = current;
     unsigned long old_cr;
 
-    if ( value & ~hvm_cr4_guest_valid_bits(v, 0) )
+    if ( value & ~hvm_cr4_guest_valid_bits(v->domain, false) )
     {
         HVM_DBG_LOG(DBG_LEVEL_1,
                     "Guest attempts to set reserved bit in CR4: %lx",
index 091c58fa1b49a47c2dc5ef96de4b76dd36ebdba9..6c215d19fe019733a501f064c18d78085f64e5ad 100644 (file)
@@ -121,9 +121,9 @@ bool svm_vmcb_isvalid(const char *from, const struct vmcb_struct *vmcb,
            (cr3 >> v->domain->arch.cpuid->extd.maxphysaddr))) )
         PRINTF("CR3: MBZ bits are set (%#"PRIx64")\n", cr3);
 
-    if ( cr4 & ~hvm_cr4_guest_valid_bits(v, false) )
+    if ( cr4 & ~hvm_cr4_guest_valid_bits(v->domain, false) )
         PRINTF("CR4: invalid bits are set (%#"PRIx64", valid: %#"PRIx64")\n",
-               cr4, hvm_cr4_guest_valid_bits(v, false));
+               cr4, hvm_cr4_guest_valid_bits(v->domain, false));
 
     if ( vmcb_get_dr6(vmcb) >> 32 )
         PRINTF("DR6: bits [63:32] are not zero (%#"PRIx64")\n",
index b2fdbf0ef03818c794b68d028c03fdea52b9bccc..d78ced9749a6c8a16f874235a5694dce9edea984 100644 (file)
@@ -1598,8 +1598,10 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr,
              * Update CR4 host mask to only trap when the guest tries to set
              * bits that are controlled by the hypervisor.
              */
-            v->arch.hvm_vmx.cr4_host_mask = HVM_CR4_HOST_MASK | X86_CR4_PKE |
-                                            ~hvm_cr4_guest_valid_bits(v, 0);
+            v->arch.hvm_vmx.cr4_host_mask =
+                (HVM_CR4_HOST_MASK | X86_CR4_PKE |
+                 ~hvm_cr4_guest_valid_bits(v->domain, false));
+
             v->arch.hvm_vmx.cr4_host_mask |= v->arch.hvm_vmx.vmx_realmode ?
                                              X86_CR4_VME : 0;
             v->arch.hvm_vmx.cr4_host_mask |= !hvm_paging_enabled(v) ?
index dcd3b28f863b0d233562de1bf2694661afd3602f..43f7297c04fb250add72d89a3149f7bdfbd0c211 100644 (file)
@@ -2140,7 +2140,7 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
         data = X86_CR4_VMXE;
         break;
     case MSR_IA32_VMX_CR4_FIXED1:
-        data = hvm_cr4_guest_valid_bits(v, 0);
+        data = hvm_cr4_guest_valid_bits(d, false);
         break;
     case MSR_IA32_VMX_MISC:
         /* Do not support CR3-target feature now */
index 0775d0c214ff322090ae62e0f757f53c53fb44dd..ef5e198ebd0dea0de52aadfb8c02b25a53cacd00 100644 (file)
@@ -646,7 +646,7 @@ static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
 /* Check CR4/EFER values */
 const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
                            signed int cr0_pg);
-unsigned long hvm_cr4_guest_valid_bits(const struct vcpu *v, bool restore);
+unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore);
 
 /*
  * This must be defined as a macro instead of an inline function,