static int hvmemul_wbinvd(
struct x86_emulate_ctxt *ctxt)
{
- hvm_funcs.wbinvd_intercept();
+ alternative_vcall(hvm_funcs.wbinvd_intercept);
return X86EMUL_OKAY;
}
struct vcpu *curr = current;
if ( !curr->fpu_dirtied )
- hvm_funcs.fpu_dirty_intercept();
+ alternative_vcall(hvm_funcs.fpu_dirty_intercept);
else if ( type == X86EMUL_FPU_fpu )
{
const typeof(curr->arch.xsave_area->fpu_sse) *fpu_ctxt =
{
curr->fpu_dirtied = false;
stts();
- hvm_funcs.fpu_leave(curr);
+ alternative_vcall(hvm_funcs.fpu_leave, curr);
}
}
}
if ( hvmemul_ctxt->intr_shadow != new_intr_shadow )
{
hvmemul_ctxt->intr_shadow = new_intr_shadow;
- hvm_funcs.set_interrupt_shadow(curr, new_intr_shadow);
+ alternative_vcall(hvm_funcs.set_interrupt_shadow,
+ curr, new_intr_shadow);
}
if ( hvmemul_ctxt->ctxt.retire.hlt &&
memset(hvmemul_ctxt, 0, sizeof(*hvmemul_ctxt));
- hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(curr);
+ hvmemul_ctxt->intr_shadow =
+ alternative_call(hvm_funcs.get_interrupt_shadow, curr);
hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
struct vcpu *v;
for_each_vcpu ( d, v )
- hvm_funcs.set_rdtsc_exiting(v, enable);
+ alternative_vcall(hvm_funcs.set_rdtsc_exiting, v, enable);
}
void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat)
{
- if ( !hvm_funcs.get_guest_pat(v, guest_pat) )
+ if ( !alternative_call(hvm_funcs.get_guest_pat, v, guest_pat) )
*guest_pat = v->arch.hvm.pat_cr;
}
return 0;
}
- if ( !hvm_funcs.set_guest_pat(v, guest_pat) )
+ if ( !alternative_call(hvm_funcs.set_guest_pat, v, guest_pat) )
v->arch.hvm.pat_cr = guest_pat;
return 1;
/* nothing, best effort only */;
}
- return hvm_funcs.set_guest_bndcfgs(v, val);
+ return alternative_call(hvm_funcs.set_guest_bndcfgs, v, val);
}
/*
static bool hvm_get_pending_event(struct vcpu *v, struct x86_event *info)
{
info->cr2 = v->arch.hvm.guest_cr[2];
- return hvm_funcs.get_pending_event(v, info);
+
+ return alternative_call(hvm_funcs.get_pending_event, v, info);
}
void hvm_do_resume(struct vcpu *v)
}
}
- hvm_funcs.inject_event(event);
+ alternative_vcall(hvm_funcs.inject_event, event);
}
int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
(!rangeset_is_empty(d->iomem_caps) ||
!rangeset_is_empty(d->arch.ioport_caps) ||
has_arch_pdevs(d)) )
- hvm_funcs.handle_cd(v, value);
+ alternative_vcall(hvm_funcs.handle_cd, v, value);
hvm_update_cr(v, 0, value);
goto gp_fault;
/* If ret == 0 then this is not an MCE MSR, see other MSRs. */
ret = ((ret == 0)
- ? hvm_funcs.msr_read_intercept(msr, msr_content)
+ ? alternative_call(hvm_funcs.msr_read_intercept,
+ msr, msr_content)
: X86EMUL_OKAY);
break;
}
goto gp_fault;
/* If ret == 0 then this is not an MCE MSR, see other MSRs. */
ret = ((ret == 0)
- ? hvm_funcs.msr_write_intercept(msr, msr_content)
+ ? alternative_call(hvm_funcs.msr_write_intercept,
+ msr, msr_content)
: X86EMUL_OKAY);
break;
}
void *hypercall_page)
{
hvm_latch_shinfo_size(d);
- hvm_funcs.init_hypercall_page(d, hypercall_page);
+ alternative_vcall(hvm_funcs.init_hypercall_page, d, hypercall_page);
}
void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg)
{
- hvm_funcs.get_segment_register(v, seg, reg);
+ alternative_vcall(hvm_funcs.get_segment_register, v, seg, reg);
switch ( seg )
{
return;
}
- hvm_funcs.set_segment_register(v, seg, reg);
+ alternative_vcall(hvm_funcs.set_segment_register, v, seg, reg);
}
/*
hvm_guest_x86_mode(struct vcpu *v)
{
ASSERT(v == current);
- return hvm_funcs.guest_x86_mode(v);
+ return alternative_call(hvm_funcs.guest_x86_mode, v);
}
static inline void
hvm_update_host_cr3(struct vcpu *v)
{
if ( hvm_funcs.update_host_cr3 )
- hvm_funcs.update_host_cr3(v);
+ alternative_vcall(hvm_funcs.update_host_cr3, v);
}
static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
{
- hvm_funcs.update_guest_cr(v, cr, 0);
+ alternative_vcall(hvm_funcs.update_guest_cr, v, cr, 0);
}
static inline void hvm_update_guest_cr3(struct vcpu *v, bool noflush)
{
unsigned int flags = noflush ? HVM_UPDATE_GUEST_CR3_NOFLUSH : 0;
- hvm_funcs.update_guest_cr(v, 3, flags);
+ alternative_vcall(hvm_funcs.update_guest_cr, v, 3, flags);
}
static inline void hvm_update_guest_efer(struct vcpu *v)
{
- hvm_funcs.update_guest_efer(v);
+ alternative_vcall(hvm_funcs.update_guest_efer, v);
}
static inline void hvm_cpuid_policy_changed(struct vcpu *v)
{
- hvm_funcs.cpuid_policy_changed(v);
+ alternative_vcall(hvm_funcs.cpuid_policy_changed, v);
}
static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset,
uint64_t at_tsc)
{
- hvm_funcs.set_tsc_offset(v, offset, at_tsc);
+ alternative_vcall(hvm_funcs.set_tsc_offset, v, offset, at_tsc);
}
/*
static inline unsigned int
hvm_get_cpl(struct vcpu *v)
{
- return hvm_funcs.get_cpl(v);
+ return alternative_call(hvm_funcs.get_cpl, v);
}
static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v)
{
- return hvm_funcs.get_shadow_gs_base(v);
+ return alternative_call(hvm_funcs.get_shadow_gs_base, v);
}
static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
{
return hvm_funcs.get_guest_bndcfgs &&
- hvm_funcs.get_guest_bndcfgs(v, val);
+ alternative_call(hvm_funcs.get_guest_bndcfgs, v, val);
}
#define has_hvm_params(d) \
static inline bool hvm_event_pending(const struct vcpu *v)
{
- return hvm_funcs.event_pending(v);
+ return alternative_call(hvm_funcs.event_pending, v);
}
static inline void hvm_invlpg(struct vcpu *v, unsigned long linear)
{
- hvm_funcs.invlpg(v, linear);
+ alternative_vcall(hvm_funcs.invlpg, v, linear);
}
/* These bits in CR4 are owned by the host. */
static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
{
- return (hvm_funcs.get_insn_bytes ? hvm_funcs.get_insn_bytes(v, buf) : 0);
+ return (hvm_funcs.get_insn_bytes
+ ? alternative_call(hvm_funcs.get_insn_bytes, v, buf) : 0);
}
static inline void hvm_set_info_guest(struct vcpu *v)
{
if ( hvm_funcs.set_info_guest )
- return hvm_funcs.set_info_guest(v);
+ alternative_vcall(hvm_funcs.set_info_guest, v);
}
static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs)