]> xenbits.xensource.com Git - xen.git/commitdiff
Revert "x86: make Viridian support optional"
authorJan Beulich <jbeulich@suse.com>
Mon, 24 Mar 2025 13:36:57 +0000 (14:36 +0100)
committerJan Beulich <jbeulich@suse.com>
Mon, 24 Mar 2025 13:36:57 +0000 (14:36 +0100)
This reverts commit e0cf36bf295b40cac71af26b35eedee216e156ff. It
introduced not just UBSAN failures, but apparentlz actual NULL
de-references.

xen/arch/x86/hvm/Kconfig
xen/arch/x86/hvm/Makefile
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/vlapic.c
xen/arch/x86/include/asm/hvm/domain.h
xen/arch/x86/include/asm/hvm/hvm.h
xen/arch/x86/include/asm/hvm/vcpu.h

index 4b4f07495bae7feb5d8ae4cc4e1553e03d3b5dc8..2def0f98e26e5a0bf4cce4a81bf8a43ee65c02ee 100644 (file)
@@ -63,16 +63,6 @@ config ALTP2M
 
          If unsure, stay with defaults.
 
-config HVM_VIRIDIAN
-       bool "Hyper-V enlightenments for guests" if EXPERT
-       default y
-       help
-         Support optimizations for Hyper-V guests such as faster hypercalls,
-         efficient timer and interrupt handling, and enhanced paravirtualized
-         I/O. This is to improve performance and compatibility of Windows VMs.
-
-         If unsure, say Y.
-
 config MEM_PAGING
        bool "Xen memory paging support (UNSUPPORTED)" if UNSUPPORTED
        depends on VM_EVENT
index 6cc2e74fc49bf97310ab9046ce3a335111c4c245..4c1fa5c6c2bf75d336b39f343241bfced5b91b09 100644 (file)
@@ -1,6 +1,6 @@
 obj-$(CONFIG_AMD_SVM) += svm/
 obj-$(CONFIG_INTEL_VMX) += vmx/
-obj-$(CONFIG_HVM_VIRIDIAN) += viridian/
+obj-y += viridian/
 
 obj-y += asid.o
 obj-y += dm.o
index 529068e80ce4b1a59fb135446bb8d3924371cf5b..5950f3160fe5a11dcbd436e70c54fe6a85054ae3 100644 (file)
@@ -695,12 +695,9 @@ int hvm_domain_initialise(struct domain *d,
     if ( hvm_tsc_scaling_supported )
         d->arch.hvm.tsc_scaling_ratio = hvm_default_tsc_scaling_ratio;
 
-    if ( is_viridian_domain(d) )
-    {
-        rc = viridian_domain_init(d);
-        if ( rc )
-            goto fail2;
-    }
+    rc = viridian_domain_init(d);
+    if ( rc )
+        goto fail2;
 
     rc = alternative_call(hvm_funcs.domain_initialise, d);
     if ( rc != 0 )
@@ -736,8 +733,7 @@ void hvm_domain_relinquish_resources(struct domain *d)
     if ( hvm_funcs.nhvm_domain_relinquish_resources )
         alternative_vcall(hvm_funcs.nhvm_domain_relinquish_resources, d);
 
-    if ( is_viridian_domain(d) )
-        viridian_domain_deinit(d);
+    viridian_domain_deinit(d);
 
     ioreq_server_destroy_all(d);
 
@@ -1641,12 +1637,9 @@ int hvm_vcpu_initialise(struct vcpu *v)
          && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: nestedhvm_vcpu_destroy */
         goto fail5;
 
-    if ( is_viridian_domain(d) )
-    {
-        rc = viridian_vcpu_init(v);
-        if ( rc )
-            goto fail6;
-    }
+    rc = viridian_vcpu_init(v);
+    if ( rc )
+        goto fail6;
 
     rc = ioreq_server_add_vcpu_all(d, v);
     if ( rc != 0 )
@@ -1676,15 +1669,13 @@ int hvm_vcpu_initialise(struct vcpu *v)
  fail2:
     hvm_vcpu_cacheattr_destroy(v);
  fail1:
-    if ( is_viridian_domain(d) )
-        viridian_vcpu_deinit(v);
+    viridian_vcpu_deinit(v);
     return rc;
 }
 
 void hvm_vcpu_destroy(struct vcpu *v)
 {
-    if ( is_viridian_domain(v->domain) )
-        viridian_vcpu_deinit(v);
+    viridian_vcpu_deinit(v);
 
     ioreq_server_remove_vcpu_all(v->domain, v);
 
index 3d76ce3f0da919a783ee19ebc91538310ec0cebf..065b2aab5b20bdce63e4f4b2eef0f4a544f8afdb 100644 (file)
@@ -426,8 +426,7 @@ void vlapic_EOI_set(struct vlapic *vlapic)
      * priority vector and then recurse to handle the lower priority
      * vector.
      */
-    bool missed_eoi = has_viridian_apic_assist(v->domain) &&
-                      viridian_apic_assist_completed(v);
+    bool missed_eoi = viridian_apic_assist_completed(v);
     int vector;
 
  again:
@@ -443,7 +442,7 @@ void vlapic_EOI_set(struct vlapic *vlapic)
      * NOTE: It is harmless to call viridian_apic_assist_clear() on a
      *       recursion, even though it is not necessary.
      */
-    if ( has_viridian_apic_assist(v->domain) && !missed_eoi )
+    if ( !missed_eoi )
         viridian_apic_assist_clear(v);
 
     vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
@@ -1361,8 +1360,7 @@ int vlapic_has_pending_irq(struct vcpu *v)
      * If so, we need to emulate the EOI here before comparing ISR
      * with IRR.
      */
-    if ( has_viridian_apic_assist(v->domain) &&
-         viridian_apic_assist_completed(v) )
+    if ( viridian_apic_assist_completed(v) )
         vlapic_EOI_set(vlapic);
 
     isr = vlapic_find_highest_isr(vlapic);
@@ -1375,8 +1373,7 @@ int vlapic_has_pending_irq(struct vcpu *v)
     if ( isr >= 0 &&
          (irr & 0xf0) <= (isr & 0xf0) )
     {
-        if ( has_viridian_apic_assist(v->domain) )
-            viridian_apic_assist_clear(v);
+        viridian_apic_assist_clear(v);
         return -1;
     }
 
index 07eec231f0238760f4a5c1fddc251dd6410a5b2a..333501d5f2ac01676646b9b277b551f06d43c3a5 100644 (file)
@@ -111,9 +111,7 @@ struct hvm_domain {
     /* hypervisor intercepted msix table */
     struct list_head       msixtbl_list;
 
-#ifdef CONFIG_HVM_VIRIDIAN
     struct viridian_domain *viridian;
-#endif
 
     /*
      * TSC value that VCPUs use to calculate their tsc_offset value.
index 9c30457eceb18b42892cfe2fea0127af6ded6887..963e8201130a9c822299d1762916e4b7d414d6b4 100644 (file)
@@ -507,8 +507,7 @@ hvm_get_cpl(struct vcpu *v)
     (has_hvm_params(d) ? (d)->arch.hvm.params[HVM_PARAM_VIRIDIAN] : 0)
 
 #define is_viridian_domain(d) \
-    (IS_ENABLED(CONFIG_HVM_VIRIDIAN) && \
-     is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
+    (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
 
 #define is_viridian_vcpu(v) \
     is_viridian_domain((v)->domain)
index 4c5a76a707a14f440150e013485632df8feaca41..196fed6d5de54c59c53a3a6a41fbfaf84ee41262 100644 (file)
@@ -172,9 +172,7 @@ struct hvm_vcpu {
     /* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
     struct x86_event     inject_event;
 
-#ifdef CONFIG_HVM_VIRIDIAN
     struct viridian_vcpu *viridian;
-#endif
 };
 
 #endif /* __ASM_X86_HVM_VCPU_H__ */