APIC virtualization support is currently implemented only for Intel VT-x.
To aid future work on separating AMD-V from Intel VT-x code, instead of
calling directly vmx_vlapic_msr_changed() from common hvm code, add a stub
to the hvm_function_table, named update_vlapic_mode, and create a wrapper
function, called hvm_vlapic_mode(), to be used by common hvm code.
After the change above, do not include header asm/hvm/vmx/vmx.h as it is
not required anymore and resolve subsequent build errors for implicit
declaration of functions ‘TRACE_2_LONG_3D’ and ‘TRC_PAR_LONG’ by including
missing asm/hvm/trace.h header.
No functional change intended.
Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
#include <asm/hvm/hvm.h>
#include <asm/hvm/io.h>
#include <asm/hvm/support.h>
-#include <asm/hvm/vmx/vmx.h>
#include <asm/hvm/nestedhvm.h>
+#include <asm/hvm/trace.h>
#include <asm/hvm/viridian.h>
#include <public/hvm/ioreq.h>
#include <public/hvm/params.h>
if ( vlapic_x2apic_mode(vlapic) )
set_x2apic_id(vlapic);
- vmx_vlapic_msr_changed(vlapic_vcpu(vlapic));
+ hvm_update_vlapic_mode(vlapic_vcpu(vlapic));
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
"apic base msr is 0x%016"PRIx64, vlapic->hw.apic_base_msr);
unlikely(vlapic_x2apic_mode(s)) )
return -EINVAL;
- vmx_vlapic_msr_changed(v);
+ hvm_update_vlapic_mode(v);
return 0;
}
.nhvm_vcpu_vmexit_event = nvmx_vmexit_event,
.nhvm_intr_blocked = nvmx_intr_blocked,
.nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
+ .update_vlapic_mode = vmx_vlapic_msr_changed,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
.enable_msr_interception = vmx_enable_msr_interception,
.altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
vmx_vmcs_exit(v);
}
-void vmx_vlapic_msr_changed(struct vcpu *v)
+void cf_check vmx_vlapic_msr_changed(struct vcpu *v)
{
int virtualize_x2apic_mode;
struct vlapic *vlapic = vcpu_vlapic(v);
void (*handle_eoi)(uint8_t vector, int isr);
int (*pi_update_irte)(const struct vcpu *v, const struct pirq *pirq,
uint8_t gvec);
+ void (*update_vlapic_mode)(struct vcpu *v);
/*Walk nested p2m */
int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
return alternative_call(hvm_funcs.pi_update_irte, v, pirq, gvec);
}
+static inline void hvm_update_vlapic_mode(struct vcpu *v)
+{
+ if ( hvm_funcs.update_vlapic_mode )
+ alternative_vcall(hvm_funcs.update_vlapic_mode, v);
+}
+
#else /* CONFIG_HVM */
#define hvm_enabled false
void vmx_asm_vmexit_handler(struct cpu_user_regs);
void vmx_intr_assist(void);
void noreturn cf_check vmx_do_resume(void);
-void vmx_vlapic_msr_changed(struct vcpu *v);
+void cf_check vmx_vlapic_msr_changed(struct vcpu *v);
struct hvm_emulate_ctxt;
void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt);
void vmx_realmode(struct cpu_user_regs *regs);