}
}
+void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp)
+{
+ v->arch.user_regs.eax = rsp->data.regs.x86.rax;
+ v->arch.user_regs.ebx = rsp->data.regs.x86.rbx;
+ v->arch.user_regs.ecx = rsp->data.regs.x86.rcx;
+ v->arch.user_regs.edx = rsp->data.regs.x86.rdx;
+ v->arch.user_regs.esp = rsp->data.regs.x86.rsp;
+ v->arch.user_regs.ebp = rsp->data.regs.x86.rbp;
+ v->arch.user_regs.esi = rsp->data.regs.x86.rsi;
+ v->arch.user_regs.edi = rsp->data.regs.x86.rdi;
+
+ v->arch.user_regs.r8 = rsp->data.regs.x86.r8;
+ v->arch.user_regs.r9 = rsp->data.regs.x86.r9;
+ v->arch.user_regs.r10 = rsp->data.regs.x86.r10;
+ v->arch.user_regs.r11 = rsp->data.regs.x86.r11;
+ v->arch.user_regs.r12 = rsp->data.regs.x86.r12;
+ v->arch.user_regs.r13 = rsp->data.regs.x86.r13;
+ v->arch.user_regs.r14 = rsp->data.regs.x86.r14;
+ v->arch.user_regs.r15 = rsp->data.regs.x86.r15;
+
+ v->arch.user_regs.eflags = rsp->data.regs.x86.rflags;
+ v->arch.user_regs.eip = rsp->data.regs.x86.rip;
+}
+
/*
* Local variables:
* mode: C
if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
{
+ if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
+ vm_event_set_registers(v, &rsp);
+
if ( rsp.flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP )
vm_event_toggle_singlestep(d, v);
/* Not supported on ARM. */
}
+static inline
+void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp)
+{
+ /* Not supported on ARM. */
+}
+
#endif /* __ASM_ARM_VM_EVENT_H__ */
void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp);
+void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp);
+
#endif /* __ASM_X86_VM_EVENT_H__ */
* by the altp2m_idx response field if possible.
*/
#define VM_EVENT_FLAG_ALTERNATE_P2M (1 << 7)
+/*
+ * Set the vCPU registers to the values in the vm_event response.
+ * At the moment x86-only, applies to EAX-EDX, ESP, EBP, ESI, EDI, R8-R15,
+ * EFLAGS, and EIP.
+ * Requires the vCPU to be paused already (synchronous events only).
+ */
+#define VM_EVENT_FLAG_SET_REGISTERS (1 << 8)
/*
* Reasons for the vm event request