#include <xen/iocap.h>
#include <xen/paging.h>
#include <asm/irq.h>
+#include <asm/hvm/emulate.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
#include <asm/processor.h>
else
{
vcpu_pause(v);
+
v->arch.xcr0 = _xcr0;
v->arch.xcr0_accum = _xcr0_accum;
v->arch.nonlazy_xstate_used = _xcr0_accum & XSTATE_NONLAZY;
compress_xsave_states(v, _xsave_area,
evc->size - PV_XSAVE_HDR_SIZE);
+
+ if ( is_hvm_domain(d) )
+ hvmemul_cancel(v);
+
vcpu_unpause(v);
}
#include <xen/paging.h>
#include <xen/sched.h>
+#include <asm/hvm/emulate.h>
+
#include <public/hvm/hvm_vcpu.h>
static int check_segment(struct segment_register *reg, enum x86_segment seg)
paging_update_paging_modes(v);
+ hvmemul_cancel(v);
+
v->is_initialised = 1;
set_bit(_VPF_down, &v->pause_flags);
.ops = &ioreq_server_ops
};
+/*
+ * Drop all records of in-flight emulation. This is needed whenever a vCPU's
+ * register state may have changed behind the emulator's back.
+ */
+void hvmemul_cancel(struct vcpu *v)
+{
+ struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
+
+ vio->io_req.state = STATE_IOREQ_NONE;
+ vio->io_completion = HVMIO_no_completion;
+ vio->mmio_cache_count = 0;
+ vio->mmio_insn_bytes = 0;
+ vio->mmio_access = (struct npfec){};
+ vio->mmio_retry = false;
+ vio->g2m_ioport = NULL;
+}
+
static int hvmemul_do_io(
bool_t is_mmio, paddr_t addr, unsigned long *reps, unsigned int size,
uint8_t dir, bool_t df, bool_t data_is_addr, uintptr_t data)
return tsc + v->arch.hvm.cache_tsc_offset;
}
+void hvm_set_info_guest(struct vcpu *v)
+{
+ if ( hvm_funcs.set_info_guest )
+ alternative_vcall(hvm_funcs.set_info_guest, v);
+
+ hvmemul_cancel(v);
+}
+
void hvm_migrate_timers(struct vcpu *v)
{
rtc_migrate_timers(v);
v->arch.dr6 = ctxt.dr6;
v->arch.dr7 = ctxt.dr7;
+ hvmemul_cancel(v);
+
/* Auxiliary processors should be woken immediately. */
v->is_initialised = 1;
clear_bit(_VPF_down, &v->pause_flags);
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/vpmu.h>
+#include <asm/hvm/emulate.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/io.h>
#include <asm/hvm/support.h>
BUG();
}
+ hvmemul_cancel(target);
+
vcpu_unpause(target);
}
unsigned int insn_bytes);
void hvm_emulate_writeback(
struct hvm_emulate_ctxt *hvmemul_ctxt);
+void hvmemul_cancel(struct vcpu *v);
struct segment_register *hvmemul_get_seg_reg(
enum x86_segment seg,
struct hvm_emulate_ctxt *hvmemul_ctxt);
void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg);
+void hvm_set_info_guest(struct vcpu *v);
+
bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned int inst_len);
? alternative_call(hvm_funcs.get_insn_bytes, v, buf) : 0);
}
-static inline void hvm_set_info_guest(struct vcpu *v)
-{
- if ( hvm_funcs.set_info_guest )
- alternative_vcall(hvm_funcs.set_info_guest, v);
-}
-
static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs)
{
#ifndef NDEBUG
*/
int hvm_guest_x86_mode(struct vcpu *v);
unsigned long hvm_get_shadow_gs_base(struct vcpu *v);
-void hvm_set_info_guest(struct vcpu *v);
void hvm_cpuid_policy_changed(struct vcpu *v);
void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, uint64_t at_tsc);
bool hvm_get_guest_bndcfgs(struct vcpu *v, uint64_t *val);