Add config option HVM_VIRIDIAN that covers viridian code within HVM.
Calls to viridian functions guarded by is_viridian_domain() and related macros.
Having this option may be beneficial by reducing code footprint for systems
that are not using Hyper-V.
Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>
Reviewed-by: Alejandro Vallejo <alejandro.vallejo@cloud.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
If unsure, stay with defaults.
+config HVM_VIRIDIAN
+ bool "Hyper-V enlightenments for guests" if EXPERT
+ default y
+ help
+ Support optimizations for Hyper-V guests such as faster hypercalls,
+ efficient timer and interrupt handling, and enhanced paravirtualized
+ I/O. This is to improve performance and compatibility of Windows VMs.
+
+ If unsure, say Y.
+
config MEM_PAGING
bool "Xen memory paging support (UNSUPPORTED)" if UNSUPPORTED
depends on VM_EVENT
obj-$(CONFIG_AMD_SVM) += svm/
obj-$(CONFIG_INTEL_VMX) += vmx/
-obj-y += viridian/
+obj-$(CONFIG_HVM_VIRIDIAN) += viridian/
obj-y += asid.o
obj-y += dm.o
if ( hvm_tsc_scaling_supported )
d->arch.hvm.tsc_scaling_ratio = hvm_default_tsc_scaling_ratio;
- rc = viridian_domain_init(d);
- if ( rc )
- goto fail2;
+ if ( is_viridian_domain(d) )
+ {
+ rc = viridian_domain_init(d);
+ if ( rc )
+ goto fail2;
+ }
rc = alternative_call(hvm_funcs.domain_initialise, d);
if ( rc != 0 )
if ( hvm_funcs.nhvm_domain_relinquish_resources )
alternative_vcall(hvm_funcs.nhvm_domain_relinquish_resources, d);
- viridian_domain_deinit(d);
+ if ( is_viridian_domain(d) )
+ viridian_domain_deinit(d);
ioreq_server_destroy_all(d);
&& (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: nestedhvm_vcpu_destroy */
goto fail5;
- rc = viridian_vcpu_init(v);
- if ( rc )
- goto fail6;
+ if ( is_viridian_domain(d) )
+ {
+ rc = viridian_vcpu_init(v);
+ if ( rc )
+ goto fail6;
+ }
rc = ioreq_server_add_vcpu_all(d, v);
if ( rc != 0 )
fail2:
hvm_vcpu_cacheattr_destroy(v);
fail1:
- viridian_vcpu_deinit(v);
+ if ( is_viridian_domain(d) )
+ viridian_vcpu_deinit(v);
return rc;
}
void hvm_vcpu_destroy(struct vcpu *v)
{
- viridian_vcpu_deinit(v);
+ if ( is_viridian_domain(v->domain) )
+ viridian_vcpu_deinit(v);
ioreq_server_remove_vcpu_all(v->domain, v);
* priority vector and then recurse to handle the lower priority
* vector.
*/
- bool missed_eoi = viridian_apic_assist_completed(v);
+ bool missed_eoi = has_viridian_apic_assist(v->domain) &&
+ viridian_apic_assist_completed(v);
int vector;
again:
* NOTE: It is harmless to call viridian_apic_assist_clear() on a
* recursion, even though it is not necessary.
*/
- if ( !missed_eoi )
+ if ( has_viridian_apic_assist(v->domain) && !missed_eoi )
viridian_apic_assist_clear(v);
vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
* If so, we need to emulate the EOI here before comparing ISR
* with IRR.
*/
- if ( viridian_apic_assist_completed(v) )
+ if ( has_viridian_apic_assist(v->domain) &&
+ viridian_apic_assist_completed(v) )
vlapic_EOI_set(vlapic);
isr = vlapic_find_highest_isr(vlapic);
if ( isr >= 0 &&
(irr & 0xf0) <= (isr & 0xf0) )
{
- viridian_apic_assist_clear(v);
+ if ( has_viridian_apic_assist(v->domain) )
+ viridian_apic_assist_clear(v);
return -1;
}
/* hypervisor intercepted msix table */
struct list_head msixtbl_list;
+#ifdef CONFIG_HVM_VIRIDIAN
struct viridian_domain *viridian;
+#endif
/*
* TSC value that VCPUs use to calculate their tsc_offset value.
(has_hvm_params(d) ? (d)->arch.hvm.params[HVM_PARAM_VIRIDIAN] : 0)
#define is_viridian_domain(d) \
- (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
+ (IS_ENABLED(CONFIG_HVM_VIRIDIAN) && \
+ is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
#define is_viridian_vcpu(v) \
is_viridian_domain((v)->domain)
/* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
struct x86_event inject_event;
+#ifdef CONFIG_HVM_VIRIDIAN
struct viridian_vcpu *viridian;
+#endif
};
#endif /* __ASM_X86_HVM_VCPU_H__ */